#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
V5 最终版本: 使用 hfl/chinese-roberta-wwm-ext-large + 完整整合数据集
"""
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import AutoTokenizer, AutoModelForSequenceClassification, get_linear_schedule_with_warmup
from torch.optim import AdamW
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report
from tqdm import tqdm
import json
import os
from datetime import datetime

class SafetyDataset(Dataset):
    """安全检测数据集"""

    def __init__(self, texts, labels, tokenizer, max_length=128):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_length = max_length

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        text = str(self.texts[idx])
        label = int(self.labels[idx])

        encoding = self.tokenizer(
            text,
            add_special_tokens=True,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )

        return {
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'labels': torch.tensor(label, dtype=torch.long)
        }

def get_device():
    """获取最佳可用设备"""
    if torch.backends.mps.is_available():
        return torch.device("mps")
    elif torch.cuda.is_available():
        return torch.device("cuda")
    else:
        return torch.device("cpu")

def train_epoch(model, data_loader, optimizer, scheduler, device):
    """训练一个epoch"""
    model.train()
    losses = []
    correct_predictions = 0
    total_samples = 0

    progress_bar = tqdm(data_loader, desc='Training')

    for batch in progress_bar:
        optimizer.zero_grad()

        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        labels = batch['labels'].to(device)

        outputs = model(
            input_ids=input_ids,
            attention_mask=attention_mask,
            labels=labels
        )

        loss = outputs.loss
        logits = outputs.logits

        preds = torch.argmax(logits, dim=1)
        correct_predictions += torch.sum(preds == labels)
        total_samples += labels.size(0)

        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
        optimizer.step()
        scheduler.step()

        losses.append(loss.item())

        progress_bar.set_postfix({
            'loss': f'{loss.item():.4f}',
            'acc': f'{correct_predictions.float() / total_samples:.4f}'
        })

    return sum(losses) / len(losses), correct_predictions.float() / total_samples

def eval_model(model, data_loader, device):
    """评估模型"""
    model.eval()
    predictions = []
    true_labels = []
    losses = []

    with torch.no_grad():
        for batch in tqdm(data_loader, desc='Evaluating'):
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)

            outputs = model(
                input_ids=input_ids,
                attention_mask=attention_mask,
                labels=labels
            )

            loss = outputs.loss
            logits = outputs.logits

            losses.append(loss.item())

            preds = torch.argmax(logits, dim=1)
            predictions.extend(preds.cpu().tolist())
            true_labels.extend(labels.cpu().tolist())

    avg_loss = sum(losses) / len(losses)
    accuracy = accuracy_score(true_labels, predictions)
    precision = precision_score(true_labels, predictions, zero_division=0)
    recall = recall_score(true_labels, predictions, zero_division=0)
    f1 = f1_score(true_labels, predictions, zero_division=0)

    return {
        'loss': avg_loss,
        'accuracy': accuracy,
        'precision': precision,
        'recall': recall,
        'f1': f1,
        'predictions': predictions,
        'true_labels': true_labels
    }

def train_v5_model():
    """训练 V5 模型"""

    print("=" * 60)
    print("🚀 开始训练 V5 模型")
    print("=" * 60)

    # 配置
    model_name = "hfl/chinese-roberta-wwm-ext-large"
    train_path = "../data/final_train.csv"
    val_path = "../data/final_val.csv"
    test_path = "../data/final_test.csv"
    output_dir = "./safety_classifier_v5_roberta"

    # 超参数
    max_length = 128
    batch_size = 16  # RoBERTa Large 模型较大，使用较小 batch size
    learning_rate = 2e-5
    epochs = 3
    warmup_steps = 500

    # 检查数据文件
    if not all(os.path.exists(p) for p in [train_path, val_path, test_path]):
        print("❌ 数据文件不存在，请先运行 prepare_final_dataset.py")
        print("   python prepare_final_dataset.py")
        return

    # 设备
    device = get_device()
    print(f"\n📱 使用设备: {device}")

    # 加载数据
    print(f"\n📂 加载数据...")
    train_df = pd.read_csv(train_path)
    val_df = pd.read_csv(val_path)
    test_df = pd.read_csv(test_path)

    print(f"   训练集: {len(train_df)} 条")
    print(f"   验证集: {len(val_df)} 条")
    print(f"   测试集: {len(test_df)} 条")

    # 加载分词器和模型
    print(f"\n🔧 加载模型: {model_name}")
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForSequenceClassification.from_pretrained(
        model_name,
        num_labels=2
    )
    model = model.to(device)

    # 创建数据集
    print(f"\n📊 创建数据集...")
    train_dataset = SafetyDataset(
        train_df['text'].values,
        train_df['label'].values,
        tokenizer,
        max_length
    )
    val_dataset = SafetyDataset(
        val_df['text'].values,
        val_df['label'].values,
        tokenizer,
        max_length
    )
    test_dataset = SafetyDataset(
        test_df['text'].values,
        test_df['label'].values,
        tokenizer,
        max_length
    )

    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size)
    test_loader = DataLoader(test_dataset, batch_size=batch_size)

    # 优化器和调度器
    optimizer = AdamW(model.parameters(), lr=learning_rate)
    total_steps = len(train_loader) * epochs
    scheduler = get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=warmup_steps,
        num_training_steps=total_steps
    )

    # 训练
    print(f"\n🎯 开始训练 (epochs={epochs}, batch_size={batch_size}, lr={learning_rate})...")
    print(f"   总训练步数: {total_steps}")

    best_val_f1 = 0
    training_stats = []

    for epoch in range(epochs):
        print(f"\n{'=' * 60}")
        print(f"Epoch {epoch + 1}/{epochs}")
        print(f"{'=' * 60}")

        # 训练
        train_loss, train_acc = train_epoch(model, train_loader, optimizer, scheduler, device)
        print(f"\n训练 - Loss: {train_loss:.4f}, Accuracy: {train_acc:.4f}")

        # 验证
        val_metrics = eval_model(model, val_loader, device)
        print(f"验证 - Loss: {val_metrics['loss']:.4f}, "
              f"Accuracy: {val_metrics['accuracy']:.4f}, "
              f"Precision: {val_metrics['precision']:.4f}, "
              f"Recall: {val_metrics['recall']:.4f}, "
              f"F1: {val_metrics['f1']:.4f}")

        # 记录统计
        training_stats.append({
            'epoch': epoch + 1,
            'train_loss': train_loss,
            'train_acc': float(train_acc),
            'val_loss': val_metrics['loss'],
            'val_acc': val_metrics['accuracy'],
            'val_precision': val_metrics['precision'],
            'val_recall': val_metrics['recall'],
            'val_f1': val_metrics['f1']
        })

        # 保存最佳模型
        if val_metrics['f1'] > best_val_f1:
            best_val_f1 = val_metrics['f1']
            print(f"💾 保存最佳模型 (F1={best_val_f1:.4f})...")
            os.makedirs(output_dir, exist_ok=True)
            model.save_pretrained(output_dir)
            tokenizer.save_pretrained(output_dir)

    # 测试最佳模型
    print(f"\n{'=' * 60}")
    print("🧪 测试最佳模型")
    print(f"{'=' * 60}")

    test_metrics = eval_model(model, test_loader, device)
    print(f"\n测试集结果:")
    print(f"  Accuracy:  {test_metrics['accuracy']:.4f}")
    print(f"  Precision: {test_metrics['precision']:.4f}")
    print(f"  Recall:    {test_metrics['recall']:.4f}")
    print(f"  F1-Score:  {test_metrics['f1']:.4f}")

    # 详细分类报告
    print(f"\n详细分类报告:")
    print(classification_report(
        test_metrics['true_labels'],
        test_metrics['predictions'],
        target_names=['安全', '危险']
    ))

    # 保存训练统计
    stats_file = os.path.join(output_dir, 'training_stats.json')
    with open(stats_file, 'w', encoding='utf-8') as f:
        json.dump({
            'model_name': model_name,
            'training_stats': training_stats,
            'test_metrics': {
                'accuracy': test_metrics['accuracy'],
                'precision': test_metrics['precision'],
                'recall': test_metrics['recall'],
                'f1': test_metrics['f1']
            },
            'hyperparameters': {
                'max_length': max_length,
                'batch_size': batch_size,
                'learning_rate': learning_rate,
                'epochs': epochs,
                'warmup_steps': warmup_steps
            },
            'data_info': {
                'train_samples': len(train_df),
                'val_samples': len(val_df),
                'test_samples': len(test_df)
            },
            'timestamp': datetime.now().isoformat()
        }, f, ensure_ascii=False, indent=2)

    print(f"\n✅ 训练统计已保存到: {stats_file}")
    print(f"✅ 模型已保存到: {output_dir}")
    print(f"\n{'=' * 60}")
    print("🎉 训练完成！")
    print(f"{'=' * 60}")

if __name__ == "__main__":
    train_v5_model()
