#!/usr/bin/env python3
"""
数据预处理脚本 - QwenGuard 全量训练
功能：
1. 合并所有数据集
2. 数据清洗和去重
3. 按照 8:2 分割训练集和验证集
4. 保存统计信息
"""

import pandas as pd
import numpy as np
from pathlib import Path
import json
from sklearn.model_selection import train_test_split
from collections import Counter
import warnings

warnings.filterwarnings('ignore')


class DataPreprocessor:
    """数据预处理器"""

    def __init__(self, data_dir: str, output_dir: str):
        """
        初始化

        Args:
            data_dir: 原始数据目录
            output_dir: 输出目录
        """
        self.data_dir = Path(data_dir)
        self.output_dir = Path(output_dir)
        self.output_dir.mkdir(parents=True, exist_ok=True)

        # 数据文件列表 - 使用最终合并的数据集
        self.data_files = [
            "final_train_data_v3_simplified.csv"
        ]

    def load_all_datasets(self) -> pd.DataFrame:
        """
        加载数据集

        Returns:
            DataFrame
        """
        print("=" * 70)
        print("📂 加载数据集")
        print("=" * 70)

        all_dfs = []

        for file_name in self.data_files:
            file_path = self.data_dir / file_name

            if not file_path.exists():
                print(f"⚠️  警告: {file_name} 不存在，跳过")
                continue

            try:
                df = pd.read_csv(file_path)
                print(f"✅ {file_name}: {len(df):,} 条数据")
                all_dfs.append(df)
            except Exception as e:
                print(f"❌ 加载 {file_name} 失败: {e}")
                continue

        if not all_dfs:
            raise ValueError("没有成功加载任何数据集！")

        # 合并所有数据
        merged_df = pd.concat(all_dfs, ignore_index=True)
        print(f"\n📊 合并后总数据量: {len(merged_df):,} 条")

        return merged_df

    def clean_data(self, df: pd.DataFrame) -> pd.DataFrame:
        """
        数据清洗

        Args:
            df: 原始DataFrame

        Returns:
            清洗后的DataFrame
        """
        print("\n" + "=" * 70)
        print("🧹 数据清洗")
        print("=" * 70)

        original_count = len(df)

        # 检查必要的列
        if 'text' not in df.columns or 'label' not in df.columns:
            raise ValueError("数据集必须包含 'text' 和 'label' 列！")

        # 1. 移除空值
        df = df.dropna(subset=['text', 'label'])
        null_removed = original_count - len(df)
        print(f"移除空值: {null_removed:,} 条")

        # 2. 移除空文本
        df = df[df['text'].astype(str).str.strip() != '']
        empty_removed = original_count - null_removed - len(df)
        print(f"移除空文本: {empty_removed:,} 条")

        # 3. 确保label为整数类型 (0或1)
        df['label'] = df['label'].astype(int)
        df = df[df['label'].isin([0, 1])]
        invalid_label_removed = original_count - null_removed - empty_removed - len(df)
        print(f"移除无效标签: {invalid_label_removed:,} 条")

        # 4. 去重（基于text列）
        before_dedup = len(df)
        df = df.drop_duplicates(subset=['text'], keep='first')
        duplicates_removed = before_dedup - len(df)
        print(f"移除重复数据: {duplicates_removed:,} 条")

        # 5. 重置索引
        df = df.reset_index(drop=True)

        print(f"\n✅ 清洗完成，保留数据: {len(df):,} 条")
        print(f"📉 清洗率: {(original_count - len(df)) / original_count * 100:.2f}%")

        return df

    def analyze_data(self, df: pd.DataFrame) -> dict:
        """
        数据统计分析

        Args:
            df: DataFrame

        Returns:
            统计信息字典
        """
        print("\n" + "=" * 70)
        print("📊 数据统计分析")
        print("=" * 70)

        # 标签分布
        label_counts = df['label'].value_counts().to_dict()
        label_dist = {
            'label_0_safe': int(label_counts.get(0, 0)),
            'label_1_toxic': int(label_counts.get(1, 0)),
            'total': len(df)
        }

        print(f"总样本数: {label_dist['total']:,}")
        print(f"  - 安全样本 (label=0): {label_dist['label_0_safe']:,} "
              f"({label_dist['label_0_safe']/label_dist['total']*100:.2f}%)")
        print(f"  - 有毒样本 (label=1): {label_dist['label_1_toxic']:,} "
              f"({label_dist['label_1_toxic']/label_dist['total']*100:.2f}%)")

        # 文本长度统计
        df['text_length'] = df['text'].astype(str).str.len()
        length_stats = {
            'min': int(df['text_length'].min()),
            'max': int(df['text_length'].max()),
            'mean': float(df['text_length'].mean()),
            'median': float(df['text_length'].median()),
            'std': float(df['text_length'].std())
        }

        print(f"\n文本长度统计:")
        print(f"  - 最小长度: {length_stats['min']}")
        print(f"  - 最大长度: {length_stats['max']}")
        print(f"  - 平均长度: {length_stats['mean']:.2f}")
        print(f"  - 中位数长度: {length_stats['median']:.2f}")
        print(f"  - 标准差: {length_stats['std']:.2f}")

        # 移除临时列
        df.drop('text_length', axis=1, inplace=True)

        return {
            'label_distribution': label_dist,
            'text_length_stats': length_stats
        }

    def split_data(self, df: pd.DataFrame, train_ratio: float = 0.8,
                   random_state: int = 42) -> tuple:
        """
        分割数据集

        Args:
            df: DataFrame
            train_ratio: 训练集比例
            random_state: 随机种子

        Returns:
            (train_df, val_df)
        """
        print("\n" + "=" * 70)
        print(f"✂️  数据分割 (训练集:{int(train_ratio*100)}% / 验证集:{int((1-train_ratio)*100)}%)")
        print("=" * 70)

        # 分层采样，保持标签分布
        train_df, val_df = train_test_split(
            df,
            train_size=train_ratio,
            random_state=random_state,
            stratify=df['label']  # 保持标签比例
        )

        print(f"训练集: {len(train_df):,} 条")
        print(f"  - 安全样本: {(train_df['label']==0).sum():,}")
        print(f"  - 有毒样本: {(train_df['label']==1).sum():,}")

        print(f"\n验证集: {len(val_df):,} 条")
        print(f"  - 安全样本: {(val_df['label']==0).sum():,}")
        print(f"  - 有毒样本: {(val_df['label']==1).sum():,}")

        return train_df, val_df

    def save_data(self, train_df: pd.DataFrame, val_df: pd.DataFrame,
                  stats: dict):
        """
        保存处理后的数据

        Args:
            train_df: 训练集
            val_df: 验证集
            stats: 统计信息
        """
        print("\n" + "=" * 70)
        print("💾 保存数据")
        print("=" * 70)

        # 保存CSV
        train_path = self.output_dir / 'train.csv'
        val_path = self.output_dir / 'val.csv'

        train_df.to_csv(train_path, index=False, encoding='utf-8-sig')
        val_df.to_csv(val_path, index=False, encoding='utf-8-sig')

        print(f"✅ 训练集已保存: {train_path}")
        print(f"✅ 验证集已保存: {val_path}")

        # 保存统计信息
        stats['train_samples'] = len(train_df)
        stats['val_samples'] = len(val_df)
        stats['train_ratio'] = len(train_df) / (len(train_df) + len(val_df))
        stats['val_ratio'] = len(val_df) / (len(train_df) + len(val_df))

        stats_path = self.output_dir / 'data_stats.json'
        with open(stats_path, 'w', encoding='utf-8') as f:
            json.dump(stats, f, indent=2, ensure_ascii=False)

        print(f"✅ 统计信息已保存: {stats_path}")

    def run(self):
        """执行完整的数据预处理流程"""
        print("\n" + "=" * 70)
        print("🚀 QwenGuard 数据预处理")
        print("=" * 70)

        # 1. 加载数据
        df = self.load_all_datasets()

        # 2. 清洗数据
        df = self.clean_data(df)

        # 3. 统计分析
        stats = self.analyze_data(df)

        # 4. 分割数据
        train_df, val_df = self.split_data(df, train_ratio=0.8)

        # 5. 保存数据
        self.save_data(train_df, val_df, stats)

        print("\n" + "=" * 70)
        print("✅ 数据预处理完成！")
        print("=" * 70)
        print(f"\n📁 输出目录: {self.output_dir}")
        print(f"📊 训练集: {len(train_df):,} 条")
        print(f"📊 验证集: {len(val_df):,} 条")
        print(f"📊 总计: {len(train_df) + len(val_df):,} 条")


def main():
    """主函数"""
    # 配置路径
    data_dir = "../data"
    output_dir = "../processed_data"

    # 创建预处理器并执行
    preprocessor = DataPreprocessor(data_dir, output_dir)
    preprocessor.run()


if __name__ == "__main__":
    main()
