"""
数据预处理模块
负责文本清理、标签编码和BERT tokenization
"""

import re
import pandas as pd
import numpy as np
from typing import Dict, List, Tuple
from sklearn.preprocessing import LabelEncoder
from transformers import AutoTokenizer
import logging

logger = logging.getLogger(__name__)


class TextPreprocessor:
    """文本预处理器"""
    
    def __init__(self):
        """初始化预处理器"""
        pass
    
    def clean_text(self, text: str) -> str:
        """
        清理文本数据
        
        Args:
            text: 原始文本
            
        Returns:
            str: 清理后的文本
        """
        if pd.isna(text):
            return ""
        
        # 转换为字符串
        text = str(text)
        
        # 去除多余的空格
        text = re.sub(r'\s+', ' ', text)
        
        # 去除首尾空格
        text = text.strip()
        
        # 去除特殊的空白字符
        text = re.sub(r'[\u00A0\u2000-\u200F\u2028-\u202F\u205F-\u206F\uFEFF]', ' ', text)
        
        # 去除多余空格
        text = re.sub(r'\s+', ' ', text).strip()
        
        return text
    
    def batch_clean_text(self, texts: List[str]) -> List[str]:
        """
        批量清理文本
        
        Args:
            texts: 文本列表
            
        Returns:
            List[str]: 清理后的文本列表
        """
        return [self.clean_text(text) for text in texts]
    
    def remove_duplicates(self, df: pd.DataFrame) -> pd.DataFrame:
        """
        去除重复数据
        
        Args:
            df: 原始数据框
            
        Returns:
            pd.DataFrame: 去重后的数据框
        """
        original_len = len(df)
        df_clean = df.drop_duplicates(subset=['text'], keep='first')
        removed_count = original_len - len(df_clean)
        
        if removed_count > 0:
            logger.info(f"去除了 {removed_count} 条重复数据")
        
        return df_clean
    
    def filter_by_length(self, df: pd.DataFrame, 
                        min_length: int = 1, 
                        max_length: int = 512) -> pd.DataFrame:
        """
        根据文本长度过滤数据
        
        Args:
            df: 数据框
            min_length: 最小长度
            max_length: 最大长度
            
        Returns:
            pd.DataFrame: 过滤后的数据框
        """
        original_len = len(df)
        
        # 计算文本长度
        text_lengths = df['text'].str.len()
        
        # 过滤
        mask = (text_lengths >= min_length) & (text_lengths <= max_length)
        df_filtered = df[mask].copy()
        
        removed_count = original_len - len(df_filtered)
        if removed_count > 0:
            logger.info(f"根据长度过滤，去除了 {removed_count} 条数据")
        
        return df_filtered


class LabelProcessor:
    """标签处理器"""
    
    def __init__(self):
        """初始化标签处理器"""
        self.label_encoder = LabelEncoder()
        self.label_to_intent = {}
        self.intent_to_label = {}
        self.fitted = False
    
    def fit_transform(self, intents: List[str]) -> np.ndarray:
        """
        拟合并转换意图标签
        
        Args:
            intents: 意图列表
            
        Returns:
            np.ndarray: 编码后的标签
        """
        labels = self.label_encoder.fit_transform(intents)
        
        # 创建映射字典
        unique_labels = np.unique(labels)
        unique_intents = self.label_encoder.classes_
        
        self.label_to_intent = dict(zip(unique_labels, unique_intents))
        self.intent_to_label = dict(zip(unique_intents, unique_labels))
        
        self.fitted = True
        
        logger.info(f"标签编码完成，共 {len(unique_intents)} 个类别")
        logger.info(f"标签映射: {self.intent_to_label}")
        
        return labels
    
    def transform(self, intents: List[str]) -> np.ndarray:
        """
        转换意图标签
        
        Args:
            intents: 意图列表
            
        Returns:
            np.ndarray: 编码后的标签
        """
        if not self.fitted:
            raise ValueError("标签编码器尚未训练，请先调用 fit_transform")
        
        return self.label_encoder.transform(intents)
    
    def inverse_transform(self, labels: np.ndarray) -> List[str]:
        """
        反向转换标签
        
        Args:
            labels: 编码的标签
            
        Returns:
            List[str]: 意图列表
        """
        if not self.fitted:
            raise ValueError("标签编码器尚未训练")
        
        return self.label_encoder.inverse_transform(labels)


class BertTokenizer:
    """BERT分词器"""
    
    def __init__(self, model_name: str = "bert-base-chinese", max_length: int = 128):
        """
        初始化BERT分词器
        
        Args:
            model_name: BERT模型名称
            max_length: 最大序列长度
        """
        self.model_name = model_name
        self.max_length = max_length
        
        # 加载tokenizer，添加网络连接失败的备用方案
        try:
            logger.info(f"尝试加载 BERT tokenizer: {model_name}")
            self.tokenizer = AutoTokenizer.from_pretrained(model_name)
            logger.info(f"BERT tokenizer 加载成功: {model_name}")
        except Exception as e:
            logger.warning(f"加载 {model_name} 失败: {e}")
            logger.info("尝试使用备用方案...")
            
            # 备用方案列表
            fallback_models = [
                "hfl/chinese-bert-wwm-ext",  # 如果已缓存
                "bert-base-multilingual-cased",  # 多语言版本，可能已缓存
                "distilbert-base-multilingual-cased",  # 更小的模型
            ]
            
            loaded = False
            for fallback_model in fallback_models:
                try:
                    logger.info(f"尝试备用模型: {fallback_model}")
                    self.tokenizer = AutoTokenizer.from_pretrained(fallback_model)
                    self.model_name = fallback_model
                    logger.info(f"成功加载备用模型: {fallback_model}")
                    loaded = True
                    break
                except Exception as fallback_e:
                    logger.warning(f"备用模型 {fallback_model} 也失败: {fallback_e}")
                    continue
            
            if not loaded:
                logger.error("所有模型都无法加载，将使用简单的字符级分词器")
                raise RuntimeError(f"无法加载任何BERT模型，请检查网络连接或下载模型到本地。原错误: {e}")
    
    def analyze_text_lengths(self, texts: List[str]) -> Dict:
        """
        分析文本的token长度
        
        Args:
            texts: 文本列表
            
        Returns:
            Dict: 长度统计信息
        """
        token_lengths = []
        
        for text in texts:
            tokens = self.tokenizer.tokenize(text)
            token_lengths.append(len(tokens))
        
        stats = {
            'mean_length': np.mean(token_lengths),
            'max_length': np.max(token_lengths),
            'min_length': np.min(token_lengths),
            'percentile_95': np.percentile(token_lengths, 95),
            'percentile_99': np.percentile(token_lengths, 99),
            'lengths': token_lengths
        }
        
        logger.info(f"Token长度统计: {stats}")
        return stats
    
    def tokenize_texts(self, texts: List[str]) -> Dict:
        """
        对文本进行tokenization
        
        Args:
            texts: 文本列表
            
        Returns:
            Dict: tokenization结果
        """
        logger.info(f"开始tokenization，文本数量: {len(texts)}")
        
        # 批量tokenization
        encodings = self.tokenizer(
            texts,
            truncation=True,
            padding='max_length',
            max_length=self.max_length,
            return_tensors='pt'
        )
        
        logger.info(f"Tokenization完成，序列长度: {self.max_length}")
        return encodings
    
    def get_vocab_size(self) -> int:
        """获取词汇表大小"""
        return self.tokenizer.vocab_size


class DataPreprocessor:
    """数据预处理主类"""
    
    def __init__(self, model_name: str = "bert-base-chinese", max_length: int = 128):
        """
        初始化数据预处理器
        
        Args:
            model_name: BERT模型名称
            max_length: 最大序列长度
        """
        self.text_processor = TextPreprocessor()
        self.label_processor = LabelProcessor()
        self.bert_tokenizer = BertTokenizer(model_name, max_length)
        
        self.max_length = max_length
    
    def preprocess_data(self, df: pd.DataFrame) -> Tuple[Dict, np.ndarray, Dict]:
        """
        完整的数据预处理流程
        
        Args:
            df: 原始数据框
            
        Returns:
            Tuple: (tokenized_data, labels, mappings)
        """
        logger.info("开始数据预处理流程")
        
        # 1. 文本清理
        logger.info("步骤1: 文本清理")
        df_clean = df.copy()
        df_clean['text'] = self.text_processor.batch_clean_text(df_clean['text'].tolist())
        
        # 2. 去除重复和过滤长度
        logger.info("步骤2: 去重和长度过滤")
        df_clean = self.text_processor.remove_duplicates(df_clean)
        df_clean = self.text_processor.filter_by_length(df_clean)
        
        # 3. 标签编码
        logger.info("步骤3: 标签编码")
        labels = self.label_processor.fit_transform(df_clean['intent'].tolist())
        
        # 4. 分析文本长度并确定最大长度
        logger.info("步骤4: 分析文本长度")
        length_stats = self.bert_tokenizer.analyze_text_lengths(df_clean['text'].tolist())
        
        # 动态调整最大长度
        recommended_length = min(int(length_stats['percentile_95']) + 2, 512)
        if recommended_length != self.max_length:
            logger.info(f"建议调整最大长度为: {recommended_length}")
            self.bert_tokenizer.max_length = recommended_length
            self.max_length = recommended_length
        
        # 5. BERT tokenization
        logger.info("步骤5: BERT tokenization")
        tokenized_data = self.bert_tokenizer.tokenize_texts(df_clean['text'].tolist())
        
        # 6. 创建映射字典
        mappings = {
            'label_to_intent': self.label_processor.label_to_intent,
            'intent_to_label': self.label_processor.intent_to_label,
            'vocab_size': self.bert_tokenizer.get_vocab_size(),
            'max_length': self.max_length,
            'num_classes': len(self.label_processor.intent_to_label)
        }
        
        logger.info("数据预处理完成")
        logger.info(f"处理后数据量: {len(df_clean)}")
        logger.info(f"类别数量: {mappings['num_classes']}")
        
        return tokenized_data, labels, mappings
    
    def save_mappings(self, mappings: Dict, file_path: str):
        """
        保存映射信息
        
        Args:
            mappings: 映射字典
            file_path: 保存路径
        """
        import json
        from pathlib import Path
        
        # 转换numpy类型为Python原生类型
        mappings_serializable = {}
        for key, value in mappings.items():
            if isinstance(value, (np.integer, np.floating)):
                mappings_serializable[key] = value.item()
            elif isinstance(value, dict):
                # 处理嵌套字典中的numpy类型
                new_dict = {}
                for k, v in value.items():
                    if isinstance(k, (np.integer, np.floating)):
                        k = k.item()
                    if isinstance(v, (np.integer, np.floating)):
                        v = v.item()
                    new_dict[str(k)] = v
                mappings_serializable[key] = new_dict
            else:
                mappings_serializable[key] = value
        
        Path(file_path).parent.mkdir(parents=True, exist_ok=True)
        
        with open(file_path, 'w', encoding='utf-8') as f:
            json.dump(mappings_serializable, f, ensure_ascii=False, indent=2)
        
        logger.info(f"映射信息已保存到: {file_path}")


if __name__ == "__main__":
    # 测试数据预处理
    from data_loader import create_sample_data
    
    # 创建示例数据
    df = create_sample_data()
    
    # 初始化预处理器
    preprocessor = DataPreprocessor()
    
    # 预处理数据
    tokenized_data, labels, mappings = preprocessor.preprocess_data(df)
    
    print(f"预处理完成:")
    print(f"- 数据形状: input_ids {tokenized_data['input_ids'].shape}")
    print(f"- 标签形状: {labels.shape}")
    print(f"- 类别数量: {mappings['num_classes']}")
    print(f"- 最大长度: {mappings['max_length']}")
    
    # 保存映射
    preprocessor.save_mappings(mappings, "models/mappings.json") 