"""
训练 v3 模型 - 使用 ToxiCN 漏检样本进行针对性优化

基于 v2 模型，使用包含 680 条漏检样本的 continued_train_v2.csv 训练
"""
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import (
    BertTokenizer,
    BertForSequenceClassification,
    get_linear_schedule_with_warmup
)
from torch.optim import AdamW
import pandas as pd
from tqdm import tqdm
import numpy as np
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, classification_report
import os
import json
from datetime import datetime

class SafetyDataset(Dataset):
    def __init__(self, texts, labels, tokenizer, max_length=128):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_length = max_length

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        text = str(self.texts[idx])
        label = int(self.labels[idx])
        
        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_attention_mask=True,
            return_tensors='pt'
        )
        
        return {
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'labels': torch.tensor(label, dtype=torch.long)
        }

def get_device():
    if torch.backends.mps.is_available():
        return torch.device("mps")
    elif torch.cuda.is_available():
        return torch.device("cuda")
    else:
        return torch.device("cpu")

def train_v3_model():
    print("=" * 70)
    print("🚀 训练 Safety Classifier v3")
    print("=" * 70)
    print("优化目标: 提升隐晦歧视内容检测能力")
    print("训练数据: continued_train_v2.csv (含 680 条 ToxiCN 漏检样本)")
    print("基础模型: safety_classifier_v2")
    print("=" * 70)
    
    # 配置
    base_model_path = "./safety_classifier_v2"
    train_path = "../data/continued_train_v2.csv"
    val_path = "../data/continued_val.csv"
    test_path = "../data/total_test.csv"
    output_dir = "./safety_classifier_v3"
    epochs = 3  # 增加到3轮，因为有新的难样本
    batch_size = 32
    learning_rate = 3e-6  # 更小的学习率，避免遗忘
    max_length = 128
    
    # 1. 加载数据
    print("\n📂 加载数据集...")
    train_df = pd.read_csv(train_path)
    val_df = pd.read_csv(val_path)
    test_df = pd.read_csv(test_path)
    
    print(f"训练集: {len(train_df)} 条")
    print(f"  - label=0 (安全): {len(train_df[train_df['label']==0])} 条")
    print(f"  - label=1 (危险): {len(train_df[train_df['label']==1])} 条")
    print(f"验证集: {len(val_df)} 条")
    print(f"测试集: {len(test_df)} 条")
    
    # 2. 加载 v2 模型
    print(f"\n🔧 加载 v2 模型: {base_model_path}")
    tokenizer = BertTokenizer.from_pretrained(base_model_path)
    model = BertForSequenceClassification.from_pretrained(
        base_model_path,
        num_labels=2
    )
    
    device = get_device()
    model.to(device)
    print(f"✅ 使用设备: {device}")
    
    # 3. 准备数据集
    print("\n📦 准备数据集...")
    train_dataset = SafetyDataset(
        train_df['text'].values,
        train_df['label'].values,
        tokenizer,
        max_length
    )
    
    val_dataset = SafetyDataset(
        val_df['text'].values,
        val_df['label'].values,
        tokenizer,
        max_length
    )
    
    test_dataset = SafetyDataset(
        test_df['text'].values,
        test_df['label'].values,
        tokenizer,
        max_length
    )
    
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size)
    test_loader = DataLoader(test_dataset, batch_size=batch_size)
    
    # 4. 优化器和调度器
    optimizer = AdamW(model.parameters(), lr=learning_rate)
    total_steps = len(train_loader) * epochs
    scheduler = get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=int(0.1 * total_steps),
        num_training_steps=total_steps
    )
    
    # 5. 训练循环
    print(f"\n🏋️ 开始训练 (共 {epochs} 轮)...")
    training_stats = []
    
    for epoch in range(epochs):
        print(f"\n{'='*70}")
        print(f"Epoch {epoch + 1}/{epochs}")
        print(f"{'='*70}")
        
        # 训练
        model.train()
        train_loss = 0
        train_preds = []
        train_labels = []
        
        for batch in tqdm(train_loader, desc="训练"):
            optimizer.zero_grad()
            
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)
            
            outputs = model(
                input_ids=input_ids,
                attention_mask=attention_mask,
                labels=labels
            )
            
            loss = outputs.loss
            loss.backward()
            
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            optimizer.step()
            scheduler.step()
            
            train_loss += loss.item()
            
            logits = outputs.logits
            preds = torch.argmax(logits, dim=1)
            train_preds.extend(preds.cpu().numpy())
            train_labels.extend(labels.cpu().numpy())
        
        avg_train_loss = train_loss / len(train_loader)
        train_acc = accuracy_score(train_labels, train_preds)
        train_precision, train_recall, train_f1, _ = precision_recall_fscore_support(
            train_labels, train_preds, average='binary'
        )
        
        # 验证
        model.eval()
        val_loss = 0
        val_preds = []
        val_labels = []
        
        with torch.no_grad():
            for batch in tqdm(val_loader, desc="验证"):
                input_ids = batch['input_ids'].to(device)
                attention_mask = batch['attention_mask'].to(device)
                labels = batch['labels'].to(device)
                
                outputs = model(
                    input_ids=input_ids,
                    attention_mask=attention_mask,
                    labels=labels
                )
                
                val_loss += outputs.loss.item()
                
                logits = outputs.logits
                preds = torch.argmax(logits, dim=1)
                val_preds.extend(preds.cpu().numpy())
                val_labels.extend(labels.cpu().numpy())
        
        avg_val_loss = val_loss / len(val_loader)
        val_acc = accuracy_score(val_labels, val_preds)
        val_precision, val_recall, val_f1, _ = precision_recall_fscore_support(
            val_labels, val_preds, average='binary'
        )
        
        # 记录统计
        epoch_stats = {
            'epoch': epoch + 1,
            'train_loss': avg_train_loss,
            'train_acc': train_acc,
            'train_f1': train_f1,
            'val_loss': avg_val_loss,
            'val_acc': val_acc,
            'val_f1': val_f1,
            'val_precision': val_precision,
            'val_recall': val_recall
        }
        training_stats.append(epoch_stats)
        
        print(f"\n训练: Loss={avg_train_loss:.4f}, Acc={train_acc:.4f}, F1={train_f1:.4f}")
        print(f"验证: Loss={avg_val_loss:.4f}, Acc={val_acc:.4f}, F1={val_f1:.4f}")
        print(f"      Precision={val_precision:.4f}, Recall={val_recall:.4f}")
    
    # 6. 保存模型
    print(f"\n💾 保存 v3 模型到: {output_dir}")
    os.makedirs(output_dir, exist_ok=True)
    model.save_pretrained(output_dir)
    tokenizer.save_pretrained(output_dir)
    
    # 7. 测试集评估
    print(f"\n🧪 测试集评估...")
    model.eval()
    test_preds = []
    test_labels = []
    test_loss = 0
    
    with torch.no_grad():
        for batch in tqdm(test_loader, desc="测试"):
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)
            
            outputs = model(
                input_ids=input_ids,
                attention_mask=attention_mask,
                labels=labels
            )
            
            test_loss += outputs.loss.item()
            
            logits = outputs.logits
            preds = torch.argmax(logits, dim=1)
            test_preds.extend(preds.cpu().numpy())
            test_labels.extend(labels.cpu().numpy())
    
    test_loss = test_loss / len(test_loader)
    test_acc = accuracy_score(test_labels, test_preds)
    test_precision, test_recall, test_f1, _ = precision_recall_fscore_support(
        test_labels, test_preds, average='binary'
    )
    
    print(f"\n{'='*70}")
    print("📊 测试集结果")
    print(f"{'='*70}")
    print(f"Loss: {test_loss:.4f}")
    print(f"Accuracy: {test_acc:.4f}")
    print(f"Precision: {test_precision:.4f}")
    print(f"Recall: {test_recall:.4f}")
    print(f"F1-Score: {test_f1:.4f}")
    
    print(f"\n📋 详细分类报告:")
    print(classification_report(
        test_labels,
        test_preds,
        target_names=['安全(0)', '危险(1)']
    ))
    
    # 8. 保存统计信息
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    stats_path = os.path.join(output_dir, f'training_stats_{timestamp}.json')
    with open(stats_path, 'w', encoding='utf-8') as f:
        json.dump({
            'version': 'v3',
            'base_model': 'safety_classifier_v2',
            'improvement': 'Added 680 ToxiCN hard samples for implicit discrimination detection',
            'training_stats': training_stats,
            'test_results': {
                'loss': test_loss,
                'accuracy': test_acc,
                'precision': test_precision,
                'recall': test_recall,
                'f1': test_f1
            },
            'training_config': {
                'epochs': epochs,
                'batch_size': batch_size,
                'learning_rate': learning_rate,
                'max_length': max_length,
                'train_samples': len(train_df),
                'hard_samples': 680
            }
        }, f, indent=2, ensure_ascii=False)
    
    print(f"\n💾 训练统计已保存到: {stats_path}")
    print(f"\n✅ v3 模型训练完成！")
    print(f"\n下一步:")
    print(f"1. 更新 API 使用 v3 模型: api/main.py")
    print(f"2. 重新测试 ToxiCN 数据集验证召回率提升")

if __name__ == "__main__":
    train_v3_model()
