#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
整合所有数据集并划分为训练集、验证集、测试集
"""
import pandas as pd
from sklearn.model_selection import train_test_split
import os

def prepare_final_dataset():
    """整合数据集并划分"""

    print("📊 开始整合数据集...")

    # 1. 读取主要数据集
    print("\n1️⃣ 读取 expanded_train_clear.csv...")
    df_expanded = pd.read_csv("../data/expanded_train_clear.csv")
    print(f"   expanded_train_clear: {len(df_expanded)} 条")
    print(f"   标签分布: {df_expanded['label'].value_counts().to_dict()}")

    # 2. 读取其他有价值的数据
    print("\n2️⃣ 读取其他数据集...")

    # 读取原始训练数据
    df_total_train = pd.read_csv("../data/total_train.csv")
    print(f"   total_train: {len(df_total_train)} 条")

    # 读取困难样本
    df_hard = pd.read_csv("../data/toxicn_hard_samples.csv")
    print(f"   toxicn_hard_samples: {len(df_hard)} 条")

    # 3. 合并所有数据
    print("\n3️⃣ 合并数据...")
    all_data = pd.concat([
        df_expanded,
        df_total_train,
        df_hard
    ], ignore_index=True)
    print(f"   合并后总计: {len(all_data)} 条")

    # 4. 去重
    print("\n4️⃣ 去除重复数据...")
    original_len = len(all_data)
    all_data = all_data.drop_duplicates(subset=['text'], keep='first')
    removed = original_len - len(all_data)
    print(f"   去重后: {len(all_data)} 条 (移除 {removed} 条重复)")

    # 5. 清理数据
    print("\n5️⃣ 清理数据...")
    # 移除空文本
    all_data = all_data[all_data['text'].notna()]
    all_data = all_data[all_data['text'].str.strip() != '']
    print(f"   清理后: {len(all_data)} 条")

    # 确保标签为整数
    all_data['label'] = all_data['label'].astype(int)

    # 6. 查看标签分布
    print("\n6️⃣ 最终数据标签分布:")
    label_counts = all_data['label'].value_counts().sort_index()
    for label, count in label_counts.items():
        percentage = count / len(all_data) * 100
        label_name = "安全" if label == 0 else "危险"
        print(f"   标签 {label} ({label_name}): {count} 条 ({percentage:.2f}%)")

    # 7. 划分数据集 (80% 训练, 10% 验证, 10% 测试)
    print("\n7️⃣ 划分数据集 (训练:验证:测试 = 8:1:1)...")

    # 先分出训练集(80%)和临时集(20%)
    train_df, temp_df = train_test_split(
        all_data,
        test_size=0.2,
        random_state=42,
        stratify=all_data['label']  # 保持标签比例
    )

    # 再将临时集平分为验证集和测试集
    val_df, test_df = train_test_split(
        temp_df,
        test_size=0.5,
        random_state=42,
        stratify=temp_df['label']
    )

    print(f"   训练集: {len(train_df)} 条")
    print(f"   验证集: {len(val_df)} 条")
    print(f"   测试集: {len(test_df)} 条")

    # 8. 保存数据集
    print("\n8️⃣ 保存数据集...")
    train_path = "../data/final_train.csv"
    val_path = "../data/final_val.csv"
    test_path = "../data/final_test.csv"

    train_df.to_csv(train_path, index=False, encoding='utf-8')
    val_df.to_csv(val_path, index=False, encoding='utf-8')
    test_df.to_csv(test_path, index=False, encoding='utf-8')

    print(f"   ✅ 训练集保存到: {train_path}")
    print(f"   ✅ 验证集保存到: {val_path}")
    print(f"   ✅ 测试集保存到: {test_path}")

    # 9. 显示每个数据集的标签分布
    print("\n9️⃣ 各数据集标签分布:")
    for name, df in [("训练集", train_df), ("验证集", val_df), ("测试集", test_df)]:
        print(f"\n   {name}:")
        label_counts = df['label'].value_counts().sort_index()
        for label, count in label_counts.items():
            percentage = count / len(df) * 100
            label_name = "安全" if label == 0 else "危险"
            print(f"     标签 {label} ({label_name}): {count} 条 ({percentage:.2f}%)")

    print("\n✅ 数据集准备完成！")

    return train_path, val_path, test_path

if __name__ == "__main__":
    prepare_final_dataset()
