"""
BERT模型训练脚本 - 支持Mac M4 MPS加速
"""
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import (
    BertTokenizer,
    BertForSequenceClassification,
    get_linear_schedule_with_warmup
)
from torch.optim import AdamW
import pandas as pd
from tqdm import tqdm
import numpy as np
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, classification_report
import os
import json

class SafetyDataset(Dataset):
    """安全检测数据集"""

    def __init__(self, texts, labels, tokenizer, max_length=128):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_length = max_length

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        text = str(self.texts[idx])
        label = int(self.labels[idx])

        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_attention_mask=True,
            return_tensors='pt'
        )

        return {
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'labels': torch.tensor(label, dtype=torch.long)
        }

def get_device():
    """自动选择最佳设备：MPS > CUDA > CPU"""
    if torch.backends.mps.is_available():
        device = torch.device("mps")
        print("🚀 使用 Mac M4 MPS 加速训练")
    elif torch.cuda.is_available():
        device = torch.device("cuda")
        print("🚀 使用 CUDA GPU 加速训练")
    else:
        device = torch.device("cpu")
        print("⚠️  使用 CPU 训练（速度较慢）")
    return device

def train_epoch(model, data_loader, optimizer, scheduler, device):
    """训练一个epoch"""
    model.train()
    total_loss = 0
    predictions = []
    true_labels = []

    progress_bar = tqdm(data_loader, desc="训练中")

    for batch in progress_bar:
        optimizer.zero_grad()

        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        labels = batch['labels'].to(device)

        outputs = model(
            input_ids=input_ids,
            attention_mask=attention_mask,
            labels=labels
        )

        loss = outputs.loss
        total_loss += loss.item()

        loss.backward()
        optimizer.step()
        scheduler.step()

        # 收集预测结果
        preds = torch.argmax(outputs.logits, dim=1)
        predictions.extend(preds.cpu().numpy())
        true_labels.extend(labels.cpu().numpy())

        progress_bar.set_postfix({'loss': loss.item()})

    avg_loss = total_loss / len(data_loader)
    accuracy = accuracy_score(true_labels, predictions)

    return avg_loss, accuracy

def evaluate(model, data_loader, device):
    """评估模型"""
    model.eval()
    total_loss = 0
    predictions = []
    true_labels = []

    with torch.no_grad():
        for batch in tqdm(data_loader, desc="评估中"):
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)

            outputs = model(
                input_ids=input_ids,
                attention_mask=attention_mask,
                labels=labels
            )

            total_loss += outputs.loss.item()

            preds = torch.argmax(outputs.logits, dim=1)
            predictions.extend(preds.cpu().numpy())
            true_labels.extend(labels.cpu().numpy())

    avg_loss = total_loss / len(data_loader)
    accuracy = accuracy_score(true_labels, predictions)
    precision, recall, f1, _ = precision_recall_fscore_support(
        true_labels, predictions, average='binary'
    )

    return avg_loss, accuracy, precision, recall, f1, predictions, true_labels

def train_model(
    train_path,
    val_path,
    test_path,
    output_dir,
    model_name='bert-base-chinese',
    epochs=3,
    batch_size=32,
    learning_rate=2e-5,
    max_length=128
):
    """训练BERT安全检测模型"""

    print("="*70)
    print("🤖 BERT 内容安全检测模型训练")
    print("="*70)

    # 1. 加载数据
    print("\n📂 加载数据集...")
    train_df = pd.read_csv(train_path)
    val_df = pd.read_csv(val_path)
    test_df = pd.read_csv(test_path)

    print(f"训练集: {len(train_df)} 条")
    print(f"验证集: {len(val_df)} 条")
    print(f"测试集: {len(test_df)} 条")

    # 2. 初始化tokenizer和模型
    print(f"\n🔧 加载预训练模型: {model_name}")
    tokenizer = BertTokenizer.from_pretrained(model_name)
    model = BertForSequenceClassification.from_pretrained(
        model_name,
        num_labels=2
    )

    device = get_device()
    model.to(device)

    # 3. 创建数据集
    print("\n📦 准备数据集...")
    train_dataset = SafetyDataset(
        train_df['text'].values,
        train_df['label'].values,
        tokenizer,
        max_length
    )

    val_dataset = SafetyDataset(
        val_df['text'].values,
        val_df['label'].values,
        tokenizer,
        max_length
    )

    test_dataset = SafetyDataset(
        test_df['text'].values,
        test_df['label'].values,
        tokenizer,
        max_length
    )

    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size)
    test_loader = DataLoader(test_dataset, batch_size=batch_size)

    # 4. 配置优化器和学习率调度
    optimizer = AdamW(model.parameters(), lr=learning_rate)
    total_steps = len(train_loader) * epochs
    scheduler = get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=0,
        num_training_steps=total_steps
    )

    # 5. 训练循环
    print(f"\n🚀 开始训练 ({epochs} epochs)...")
    best_val_acc = 0
    training_stats = []

    for epoch in range(epochs):
        print(f"\n{'='*70}")
        print(f"Epoch {epoch + 1}/{epochs}")
        print(f"{'='*70}")

        # 训练
        train_loss, train_acc = train_epoch(
            model, train_loader, optimizer, scheduler, device
        )

        # 验证
        val_loss, val_acc, val_precision, val_recall, val_f1, _, _ = evaluate(
            model, val_loader, device
        )

        print(f"\n📊 Epoch {epoch + 1} 结果:")
        print(f"训练 - Loss: {train_loss:.4f}, Acc: {train_acc:.4f}")
        print(f"验证 - Loss: {val_loss:.4f}, Acc: {val_acc:.4f}, "
              f"P: {val_precision:.4f}, R: {val_recall:.4f}, F1: {val_f1:.4f}")

        # 保存统计信息
        training_stats.append({
            'epoch': epoch + 1,
            'train_loss': train_loss,
            'train_acc': train_acc,
            'val_loss': val_loss,
            'val_acc': val_acc,
            'val_precision': val_precision,
            'val_recall': val_recall,
            'val_f1': val_f1
        })

        # 保存最佳模型
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            print(f"✨ 新的最佳模型！验证准确率: {val_acc:.4f}")

            os.makedirs(output_dir, exist_ok=True)
            model.save_pretrained(output_dir)
            tokenizer.save_pretrained(output_dir)

    # 6. 测试集评估
    print(f"\n{'='*70}")
    print("📈 在测试集上评估最佳模型")
    print(f"{'='*70}")

    # 加载最佳模型
    model = BertForSequenceClassification.from_pretrained(output_dir)
    model.to(device)

    test_loss, test_acc, test_precision, test_recall, test_f1, predictions, true_labels = evaluate(
        model, test_loader, device
    )

    print(f"\n🎯 测试集结果:")
    print(f"Loss: {test_loss:.4f}")
    print(f"Accuracy: {test_acc:.4f}")
    print(f"Precision: {test_precision:.4f}")
    print(f"Recall: {test_recall:.4f}")
    print(f"F1-Score: {test_f1:.4f}")

    print(f"\n📋 详细分类报告:")
    print(classification_report(
        true_labels,
        predictions,
        target_names=['安全(0)', '危险(1)']
    ))

    # 7. 保存训练统计和测试结果
    stats_path = os.path.join(output_dir, 'training_stats.json')
    with open(stats_path, 'w', encoding='utf-8') as f:
        json.dump({
            'training_stats': training_stats,
            'test_results': {
                'loss': test_loss,
                'accuracy': test_acc,
                'precision': test_precision,
                'recall': test_recall,
                'f1': test_f1
            }
        }, f, indent=2, ensure_ascii=False)

    print(f"\n💾 模型和统计信息已保存到: {output_dir}")
    print(f"\n✅ 训练完成！")

if __name__ == "__main__":
    train_model(
        train_path="../data/total_train.csv",
        val_path="../data/verify.csv",
        test_path="../data/total_test.csv",
        output_dir="./safety_classifier",
        epochs=2,
        batch_size=32,
        learning_rate=2e-5,
        max_length=128
    )
