#!/usr/bin/env python3
"""
QwenGuard-0.6B 全量微调训练脚本
支持 Mac M4 (MPS) 和 A100 (CUDA)
"""

import os
import sys
import torch
import pandas as pd
import numpy as np
from pathlib import Path
from dataclasses import dataclass, field
from typing import Optional, Dict, List
import json
from datetime import datetime

from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    TrainingArguments,
    Trainer,
    DataCollatorForLanguageModeling,
    EarlyStoppingCallback
)
from datasets import Dataset
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
import warnings

warnings.filterwarnings('ignore')


@dataclass
class QwenGuardTrainingConfig:
    """训练配置"""

    # 模型配置
    model_name: str = "Qwen/Qwen3Guard-Gen-0.6B"
    max_length: int = 512

    # 数据路径
    train_data_path: str = "../processed_data/train.csv"
    val_data_path: str = "../processed_data/val.csv"

    # 输出路径
    output_dir: str = "../outputs"

    # 训练超参数
    num_epochs: int = 3
    learning_rate: float = 2e-5
    warmup_ratio: float = 0.1
    weight_decay: float = 0.01

    # 批次大小（根据设备自动调整）
    per_device_train_batch_size: int = field(default=2)  # MPS默认
    per_device_eval_batch_size: int = field(default=4)
    gradient_accumulation_steps: int = field(default=8)  # MPS默认

    # 日志和保存
    logging_steps: int = 50
    save_steps: int = 500
    eval_steps: int = 500
    save_total_limit: int = 3

    # 早停
    early_stopping_patience: int = 3
    early_stopping_threshold: float = 0.001

    # 随机种子
    seed: int = 42


class QwenGuardDataset:
    """QwenGuard 数据集处理器"""

    def __init__(self, tokenizer, max_length: int = 512):
        """
        初始化

        Args:
            tokenizer: 分词器
            max_length: 最大序列长度
        """
        self.tokenizer = tokenizer
        self.max_length = max_length

    def format_prompt(self, text: str, label: int) -> Dict[str, str]:
        """
        格式化为QwenGuard的对话格式

        Args:
            text: 输入文本
            label: 标签 (0=安全, 1=有毒)

        Returns:
            格式化的对话
        """
        # 构造输入消息
        messages = [
            {"role": "user", "content": text}
        ]

        # 构造输出（根据标签）
        if label == 0:
            assistant_message = "Safety: Safe"
        else:
            assistant_message = "Safety: Unsafe"

        # 应用聊天模板
        prompt = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )

        # 完整的训练文本（输入 + 输出）
        full_text = prompt + assistant_message + self.tokenizer.eos_token

        return full_text

    def tokenize_function(self, examples: Dict) -> Dict:
        """
        分词函数

        Args:
            examples: 数据批次

        Returns:
            分词后的数据
        """
        # 格式化所有样本
        formatted_texts = [
            self.format_prompt(text, label)
            for text, label in zip(examples['text'], examples['label'])
        ]

        # 分词
        tokenized = self.tokenizer(
            formatted_texts,
            truncation=True,
            max_length=self.max_length,
            padding=False,  # 使用 DataCollator 进行动态填充
            return_tensors=None
        )

        # 设置 labels（用于计算 loss）
        tokenized['labels'] = tokenized['input_ids'].copy()

        return tokenized

    def load_dataset(self, file_path: str) -> Dataset:
        """
        加载数据集

        Args:
            file_path: CSV 文件路径

        Returns:
            HuggingFace Dataset
        """
        # 读取 CSV
        df = pd.read_csv(file_path)

        # 转换为 HuggingFace Dataset
        dataset = Dataset.from_pandas(df)

        # 应用分词
        tokenized_dataset = dataset.map(
            self.tokenize_function,
            batched=True,
            remove_columns=dataset.column_names,
            desc="分词中"
        )

        return tokenized_dataset


class QwenGuardTrainer:
    """QwenGuard 训练器"""

    def __init__(self, config: QwenGuardTrainingConfig):
        """
        初始化

        Args:
            config: 训练配置
        """
        self.config = config
        self.device_type = self._get_device_type()
        self._adjust_config_for_device()

        # 设置随机种子
        self._set_seed(config.seed)

        # 创建输出目录
        self.output_dir = Path(config.output_dir)
        self.checkpoint_dir = self.output_dir / "checkpoints"
        self.best_model_dir = self.output_dir / "best_model"
        self.log_dir = self.output_dir / "logs"

        for dir_path in [self.checkpoint_dir, self.best_model_dir, self.log_dir]:
            dir_path.mkdir(parents=True, exist_ok=True)

    def _get_device_type(self) -> str:
        """获取设备类型"""
        if torch.cuda.is_available():
            return "cuda"
        elif torch.backends.mps.is_available():
            return "mps"
        else:
            return "cpu"

    def _adjust_config_for_device(self):
        """根据设备调整配置"""
        if self.device_type == "cuda":
            # A100 配置
            self.config.per_device_train_batch_size = 8
            self.config.per_device_eval_batch_size = 16
            self.config.gradient_accumulation_steps = 4
            self.use_fp16 = False
            self.use_bf16 = True
            print("🚀 使用 A100 GPU (CUDA) 训练")
            print(f"   - Batch Size: {self.config.per_device_train_batch_size}")
            print(f"   - Gradient Accumulation: {self.config.gradient_accumulation_steps}")
            print(f"   - 有效 Batch Size: {self.config.per_device_train_batch_size * self.config.gradient_accumulation_steps}")
            print(f"   - 精度: BF16")
        elif self.device_type == "mps":
            # Mac M4 配置
            self.config.per_device_train_batch_size = 2
            self.config.per_device_eval_batch_size = 4
            self.config.gradient_accumulation_steps = 8
            self.use_fp16 = True
            self.use_bf16 = False
            print("🚀 使用 Mac M4 (MPS) 训练")
            print(f"   - Batch Size: {self.config.per_device_train_batch_size}")
            print(f"   - Gradient Accumulation: {self.config.gradient_accumulation_steps}")
            print(f"   - 有效 Batch Size: {self.config.per_device_train_batch_size * self.config.gradient_accumulation_steps}")
            print(f"   - 精度: FP16")
        else:
            # CPU 配置
            self.config.per_device_train_batch_size = 1
            self.config.per_device_eval_batch_size = 2
            self.config.gradient_accumulation_steps = 16
            self.use_fp16 = False
            self.use_bf16 = False
            print("⚠️  使用 CPU 训练（速度较慢）")

    def _set_seed(self, seed: int):
        """设置随机种子"""
        import random
        random.seed(seed)
        np.random.seed(seed)
        torch.manual_seed(seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed_all(seed)

    def load_model_and_tokenizer(self):
        """加载模型和分词器"""
        print("\n" + "=" * 70)
        print(f"🔧 加载模型: {self.config.model_name}")
        print("=" * 70)

        # 加载分词器
        self.tokenizer = AutoTokenizer.from_pretrained(
            self.config.model_name,
            trust_remote_code=True,
            use_fast=False
        )

        # 设置 pad_token
        if self.tokenizer.pad_token is None:
            self.tokenizer.pad_token = self.tokenizer.eos_token

        # 加载模型
        model_kwargs = {
            "trust_remote_code": True,
        }

        if self.device_type == "cuda":
            model_kwargs["torch_dtype"] = torch.bfloat16
            model_kwargs["device_map"] = "auto"
        elif self.device_type == "mps":
            model_kwargs["torch_dtype"] = torch.float16
        else:
            model_kwargs["torch_dtype"] = torch.float32

        self.model = AutoModelForCausalLM.from_pretrained(
            self.config.model_name,
            **model_kwargs
        )

        # 如果是 MPS，需要手动移动到设备
        if self.device_type == "mps":
            self.model = self.model.to("mps")

        # 启用梯度检查点以节省显存
        self.model.gradient_checkpointing_enable()

        print(f"✅ 模型加载完成")
        print(f"   - 参数量: {self.model.num_parameters() / 1e6:.2f}M")
        print(f"   - 设备: {self.device_type}")

    def prepare_datasets(self):
        """准备数据集"""
        print("\n" + "=" * 70)
        print("📂 准备数据集")
        print("=" * 70)

        # 创建数据集处理器
        dataset_processor = QwenGuardDataset(
            self.tokenizer,
            max_length=self.config.max_length
        )

        # 加载训练集和验证集
        print(f"加载训练集: {self.config.train_data_path}")
        self.train_dataset = dataset_processor.load_dataset(self.config.train_data_path)
        print(f"✅ 训练集: {len(self.train_dataset):,} 条")

        print(f"加载验证集: {self.config.val_data_path}")
        self.val_dataset = dataset_processor.load_dataset(self.config.val_data_path)
        print(f"✅ 验证集: {len(self.val_dataset):,} 条")

    def get_training_arguments(self) -> TrainingArguments:
        """获取训练参数"""
        return TrainingArguments(
            output_dir=str(self.checkpoint_dir),

            # 训练参数
            num_train_epochs=self.config.num_epochs,
            per_device_train_batch_size=self.config.per_device_train_batch_size,
            per_device_eval_batch_size=self.config.per_device_eval_batch_size,
            gradient_accumulation_steps=self.config.gradient_accumulation_steps,

            # 优化器
            learning_rate=self.config.learning_rate,
            weight_decay=self.config.weight_decay,
            warmup_ratio=self.config.warmup_ratio,

            # 混合精度
            fp16=self.use_fp16,
            bf16=self.use_bf16,

            # 日志和保存
            logging_dir=str(self.log_dir),
            logging_steps=self.config.logging_steps,
            save_steps=self.config.save_steps,
            save_total_limit=self.config.save_total_limit,

            # 评估
            evaluation_strategy="steps",
            eval_steps=self.config.eval_steps,
            load_best_model_at_end=True,
            metric_for_best_model="eval_loss",
            greater_is_better=False,

            # 其他
            seed=self.config.seed,
            report_to="none",  # 不使用外部日志工具
            save_safetensors=True,
            remove_unused_columns=False,
        )

    def train(self):
        """执行训练"""
        print("\n" + "=" * 70)
        print("🚀 开始训练")
        print("=" * 70)

        # 数据整理器
        data_collator = DataCollatorForLanguageModeling(
            tokenizer=self.tokenizer,
            mlm=False  # 因果语言模型，不使用 MLM
        )

        # 训练参数
        training_args = self.get_training_arguments()

        # 早停回调
        early_stopping = EarlyStoppingCallback(
            early_stopping_patience=self.config.early_stopping_patience,
            early_stopping_threshold=self.config.early_stopping_threshold
        )

        # 创建 Trainer
        trainer = Trainer(
            model=self.model,
            args=training_args,
            train_dataset=self.train_dataset,
            eval_dataset=self.val_dataset,
            data_collator=data_collator,
            callbacks=[early_stopping]
        )

        # 开始训练
        print(f"\n开始时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        train_result = trainer.train()

        # 保存最佳模型
        print("\n" + "=" * 70)
        print("💾 保存最佳模型")
        print("=" * 70)

        trainer.save_model(str(self.best_model_dir))
        self.tokenizer.save_pretrained(str(self.best_model_dir))

        print(f"✅ 模型已保存到: {self.best_model_dir}")

        # 保存训练统计
        train_stats = {
            "train_runtime": train_result.metrics.get("train_runtime", 0),
            "train_samples_per_second": train_result.metrics.get("train_samples_per_second", 0),
            "train_steps_per_second": train_result.metrics.get("train_steps_per_second", 0),
            "total_flos": train_result.metrics.get("total_flos", 0),
            "train_loss": train_result.metrics.get("train_loss", 0),
            "epoch": train_result.metrics.get("epoch", 0),
        }

        stats_path = self.best_model_dir / "training_stats.json"
        with open(stats_path, 'w', encoding='utf-8') as f:
            json.dump(train_stats, f, indent=2, ensure_ascii=False)

        print(f"✅ 训练统计已保存到: {stats_path}")

        # 打印训练摘要
        print("\n" + "=" * 70)
        print("📊 训练摘要")
        print("=" * 70)
        print(f"训练轮数: {train_stats['epoch']:.2f}")
        print(f"训练时长: {train_stats['train_runtime']:.2f} 秒")
        print(f"训练速度: {train_stats['train_samples_per_second']:.2f} samples/s")
        print(f"训练损失: {train_stats['train_loss']:.4f}")

        return trainer

    def run(self):
        """运行完整训练流程"""
        print("\n" + "=" * 70)
        print("🤖 QwenGuard-0.6B 全量微调训练")
        print("=" * 70)
        print(f"开始时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")

        # 1. 加载模型
        self.load_model_and_tokenizer()

        # 2. 准备数据
        self.prepare_datasets()

        # 3. 训练
        trainer = self.train()

        print("\n" + "=" * 70)
        print("✅ 训练完成！")
        print("=" * 70)
        print(f"结束时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        print(f"\n📁 输出目录: {self.output_dir}")
        print(f"📁 最佳模型: {self.best_model_dir}")
        print(f"📁 训练日志: {self.log_dir}")


def main():
    """主函数"""
    # 创建配置
    config = QwenGuardTrainingConfig()

    # 创建训练器
    trainer = QwenGuardTrainer(config)

    # 运行训练
    trainer.run()


if __name__ == "__main__":
    main()
