#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
准备 V6 训练数据集 - 整合长文本数据
"""
import pandas as pd
from sklearn.model_selection import train_test_split
import os

def prepare_v6_dataset():
    """准备V6数据集，整合toxic_llm_answers"""

    print("=" * 80)
    print("📊 准备 V6 数据集（专门优化长文本处理）")
    print("=" * 80)

    # 1. 读取现有数据集
    print("\n1️⃣ 读取现有数据集...")
    df_final_train = pd.read_csv("../data/final_train.csv")
    print(f"   final_train: {len(df_final_train)} 条")

    # 2. 读取长文本数据集
    print("\n2️⃣ 读取长文本数据集 (toxic_llm_answers)...")
    df_long = pd.read_csv("../data/toxic_llm_answers.csv")
    print(f"   toxic_llm_answers: {len(df_long)} 条")
    print(f"   标签分布: {df_long['label'].value_counts().to_dict()}")

    # 统计长度
    df_long['text_length'] = df_long['text'].str.len()
    print(f"   平均长度: {df_long['text_length'].mean():.0f} 字符")
    print(f"   中位数: {df_long['text_length'].median():.0f} 字符")
    print(f"   最长: {df_long['text_length'].max()} 字符")
    print(f"   包含<think>: {df_long['text'].str.contains('<think>').sum()} 条")

    # 3. 生成平衡样本
    print("\n3️⃣ 为toxic_llm_answers生成平衡样本...")

    # 从final_train中随机抽取相同数量的label=0样本
    df_safe_samples = df_final_train[df_final_train['label'] == 0].sample(
        n=len(df_long),
        random_state=42
    )
    print(f"   抽取 {len(df_safe_samples)} 条安全样本进行平衡")

    # 4. 合并所有数据
    print("\n4️⃣ 合并数据...")
    all_data = pd.concat([
        df_final_train,  # 原有训练数据 (169,361条)
        df_long,         # 长文本危险样本 (3,042条)
        df_safe_samples  # 平衡的安全样本 (3,042条)
    ], ignore_index=True)

    print(f"   合并后总计: {len(all_data)} 条")

    # 5. 去重
    print("\n5️⃣ 去除重复数据...")
    original_len = len(all_data)
    all_data = all_data.drop_duplicates(subset=['text'], keep='first')
    removed = original_len - len(all_data)
    print(f"   去重后: {len(all_data)} 条 (移除 {removed} 条重复)")

    # 6. 清理数据
    print("\n6️⃣ 清理数据...")
    all_data = all_data[all_data['text'].notna()]
    all_data = all_data[all_data['text'].str.strip() != '']
    all_data['label'] = all_data['label'].astype(int)
    print(f"   清理后: {len(all_data)} 条")

    # 7. 标签分布
    print("\n7️⃣ 最终标签分布:")
    label_counts = all_data['label'].value_counts().sort_index()
    for label, count in label_counts.items():
        percentage = count / len(all_data) * 100
        label_name = "安全" if label == 0 else "危险"
        print(f"   标签 {label} ({label_name}): {count:,} 条 ({percentage:.2f}%)")

    # 8. 统计文本长度分布
    print("\n8️⃣ 文本长度分布:")
    all_data['text_length'] = all_data['text'].str.len()
    print(f"   平均长度: {all_data['text_length'].mean():.0f} 字符")
    print(f"   中位数: {all_data['text_length'].median():.0f} 字符")
    print(f"   最短: {all_data['text_length'].min()} 字符")
    print(f"   最长: {all_data['text_length'].max()} 字符")

    # 统计长文本比例
    long_text_count = len(all_data[all_data['text_length'] > 500])
    print(f"   长文本(>500字符): {long_text_count:,} 条 ({long_text_count/len(all_data)*100:.2f}%)")

    # 统计包含<think>的数量
    think_count = all_data['text'].str.contains('<think>', case=False).sum()
    print(f"   包含<think>标签: {think_count:,} 条 ({think_count/len(all_data)*100:.2f}%)")

    # 9. 划分数据集 (80% 训练, 10% 验证, 10% 测试)
    print("\n9️⃣ 划分数据集 (训练:验证:测试 = 8:1:1)...")

    # 先分出训练集(80%)和临时集(20%)
    train_df, temp_df = train_test_split(
        all_data.drop(columns=['text_length']),
        test_size=0.2,
        random_state=42,
        stratify=all_data['label']
    )

    # 再将临时集平分为验证集和测试集
    val_df, test_df = train_test_split(
        temp_df,
        test_size=0.5,
        random_state=42,
        stratify=temp_df['label']
    )

    print(f"   训练集: {len(train_df):,} 条")
    print(f"   验证集: {len(val_df):,} 条")
    print(f"   测试集: {len(test_df):,} 条")

    # 10. 保存数据集
    print("\n🔟 保存数据集...")
    train_path = "../data/v6_train.csv"
    val_path = "../data/v6_val.csv"
    test_path = "../data/v6_test.csv"

    train_df.to_csv(train_path, index=False, encoding='utf-8')
    val_df.to_csv(val_path, index=False, encoding='utf-8')
    test_df.to_csv(test_path, index=False, encoding='utf-8')

    print(f"   ✅ 训练集保存到: {train_path}")
    print(f"   ✅ 验证集保存到: {val_path}")
    print(f"   ✅ 测试集保存到: {test_path}")

    # 11. 显示各数据集标签分布
    print("\n1️⃣1️⃣ 各数据集标签分布:")
    for name, df in [("训练集", train_df), ("验证集", val_df), ("测试集", test_df)]:
        print(f"\n   {name}:")
        label_counts = df['label'].value_counts().sort_index()
        for label, count in label_counts.items():
            percentage = count / len(df) * 100
            label_name = "安全" if label == 0 else "危险"
            print(f"     标签 {label} ({label_name}): {count:,} 条 ({percentage:.2f}%)")

    # 12. 特别统计：长文本在测试集中的分布
    print("\n1️⃣2️⃣ 测试集长文本分布:")
    test_df['text_length'] = test_df['text'].str.len()
    long_in_test = len(test_df[test_df['text_length'] > 500])
    think_in_test = test_df['text'].str.contains('<think>', case=False).sum()
    print(f"   测试集长文本(>500字符): {long_in_test} 条 ({long_in_test/len(test_df)*100:.2f}%)")
    print(f"   测试集包含<think>: {think_in_test} 条 ({think_in_test/len(test_df)*100:.2f}%)")

    print("\n" + "=" * 80)
    print("✅ V6 数据集准备完成！")
    print("=" * 80)

    print("\n📝 数据集特点:")
    print("   - 整合了3042条长文本LLM输出（包含<think>标签）")
    print("   - 增加了对应的安全样本以保持平衡")
    print("   - 总数据量从169,361增至约175,000条")
    print("   - 长文本占比显著提升，有助于模型学习长文本特征")

    print("\n🎯 下一步:")
    print("   python train_v6_long_text.py")

    return train_path, val_path, test_path

if __name__ == "__main__":
    prepare_v6_dataset()
