import os
import time
import wandb
import torch
import pynvml
import argparse
import numpy as np
from tqdm import tqdm
from pathlib import Path
from typing import Tuple
from loguru import logger
from datetime import datetime
from dataclasses import dataclass

from cs336_basics.transformer import model as Model
from cs336_basics.transformer import optimizer as Optimizer
from cs336_basics.transformer import utils

ROOT_DIR = Path(__file__).resolve().parent.parent.parent


@dataclass
class ModelConfig:
    """模型配置"""

    vocab_size: int  # 词汇表大小
    context_length: int  # 上下文长度（序列长度）
    d_model: int  # 模型维度（嵌入维度）
    d_ff: int  # 前馈网络维度（隐藏层维度）
    num_layers: int  # Transformer Block 个数
    num_heads: int  # 注意力头数
    rope_theta: float  # RoPE 角度


@dataclass
class OptimConfig:
    """优化器配置"""

    lr: float  # 学习率初始值
    weight_decay: float  # 学习率衰减系数
    betas: Tuple[float, float]  # 余弦退火参数
    max_l2_norm: float  # L2范数阈值


@dataclass
class TrainConfig:
    """训练配置"""

    train_batch_size: int  # 训练时 batch_size
    epochs: float  # 训练轮数
    validate_batch_size: int  # 验证时的 batch_size
    validate_eval_times: int  # 验证时的评估次数
    log_freq: int  # 多少次迭代记录一次日志
    validate_freq: int  # 多少次迭代执行一次验证集测试
    checkpoint_freq: int  # 多少次迭代保存一次检查点
    train_dataset_path: str | os.PathLike  # 训练集路径
    validate_dataset_path: str | os.PathLike | None  # 验证集路径
    final_model_path: str | os.PathLike  # 最终模型保存路径
    checkpoint_save_dir: str | os.PathLike  # 检查点保存目录
    checkpoint_load_path: str | os.PathLike | None  # 检查点载入路径


if __name__ == "__main__":
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")

    logger.add(
        ROOT_DIR.joinpath(f"log/train_{timestamp}.log"),
        rotation="1 day",
        retention="7 day",
        level="INFO",
    )

    # 命令行参数解析
    parser = argparse.ArgumentParser(description="Train a transformer model.")
    parser.add_argument(
        "--model",
        type=str,
        help="模型名称",
        default="TransformerLM",
    )
    parser.add_argument(
        "--final-model-path",
        type=str,
        help="最终模型保存路径",
        default=ROOT_DIR.joinpath("output/model/model.pt"),
    )
    parser.add_argument(
        "--train-dataset-path",
        type=str,
        help="训练集路径",
        default=ROOT_DIR.joinpath("data/TinyStoriesV2-GPT4-train.npy"),
    )
    parser.add_argument(
        "--validate-dataset-path",
        type=str,
        help="验证集路径",
        default=ROOT_DIR.joinpath("data/TinyStoriesV2-GPT4-valid.npy"),
    )
    parser.add_argument(
        "--checkpoint-save-dir",
        type=str,
        help="检查点保存目录",
        default=ROOT_DIR.joinpath("output/model/checkpoints"),
    )
    parser.add_argument(
        "--checkpoint-load-path",
        type=str,
        help="检查点加载路径",
        default=None,
    )

    # 参数校验
    args = parser.parse_args()
    if args.model != "TransformerLM":
        raise ValueError(
            f"model {args.model} is not supported now. Supported model is 'TransformerLM'"
        )

    if not args.train_dataset_path:
        raise ValueError("训练集路径 (--train-dataset-path) 不能为空")

    # 设置所有超参数
    model_config = ModelConfig(
        vocab_size=10000,
        context_length=256,
        d_model=512,
        d_ff=1344,
        num_layers=4,
        num_heads=16,
        rope_theta=10000,
    )

    optim_config = OptimConfig(
        lr=3e-4,
        weight_decay=0.1,
        betas=(0.9, 0.95),
        max_l2_norm=1.0,
    )

    train_config = TrainConfig(
        train_batch_size=64,
        epochs=0.6,
        validate_batch_size=16,
        validate_eval_times=20,
        log_freq=100,
        validate_freq=1000,
        checkpoint_freq=5000,
        train_dataset_path=args.train_dataset_path,
        validate_dataset_path=args.validate_dataset_path,
        final_model_path=args.final_model_path,
        checkpoint_save_dir=args.checkpoint_save_dir,
        checkpoint_load_path=args.checkpoint_load_path,
    )

    # 选择合适的设备
    if torch.cuda.is_available():

        def is_cuda_free(device_id):
            try:
                pynvml.nvmlInit()
                handle = pynvml.nvmlDeviceGetHandleByIndex(device_id)
                util = pynvml.nvmlDeviceGetUtilizationRates(handle)
                pynvml.nvmlShutdown()
                return int(util.gpu) < 20  # GPU 利用率低于 20% 就算空闲
            except Exception:
                return False

        selected_device = None
        for i in range(torch.cuda.device_count()):
            if is_cuda_free(i):
                selected_device = f"cuda:{i}"
                break
        if selected_device is None:
            raise ValueError("无可用的 cuda 设备")

        device = torch.device(selected_device)
    elif torch.backends.mps.is_available():
        device = torch.device("mps")
    else:
        device = torch.device("cpu")
    logger.info(f"使用设备 {device} 🖥️")

    # 初始化wandb
    wandb.login(key=os.environ.get("WANDB_API_KEY"))
    wandb.init(
        project="cs336-assignement1-basics",
        name=f"TFLM_{timestamp}_{device.type}",
        config={
            "model": vars(model_config),
            "optimizer": vars(optim_config),
            "training": vars(train_config),
        },
    )

    # 模型初始化
    logger.info("开始初始化模型 🚀")
    model = Model.TransformerLM(
        vocab_size=model_config.vocab_size,
        context_length=model_config.context_length,
        num_layers=model_config.num_layers,
        d_model=model_config.d_model,
        num_heads=model_config.num_heads,
        theta=model_config.rope_theta,
        d_ff=model_config.d_ff,
        device=device,
    )
    logger.info(
        f"模型初始化成功 ✅  模型参数量: {sum(p.numel() for p in model.parameters() if p.requires_grad)}"
    )

    # 优化器初始化
    logger.info("开始初始化优化器 🚀")
    optim = Optimizer.AdamW(
        params=model.parameters(),
        lr=optim_config.lr,
        weight_decay=optim_config.weight_decay,
        betas=optim_config.betas,
    )
    logger.info("优化器初始化成功 ✅")

    # 若可以加载 checkpoint，则将 checkpoint 加载至模型中
    if train_config.checkpoint_load_path:
        logger.info(f"开始加载模型 checkpoint 🚀: {train_config.checkpoint_load_path}")
        start_iter = utils.load_checkpoint(
            src=train_config.checkpoint_load_path, model=model, optimizer=optim
        )
        start_iter += 1
        logger.info("模型 checkpoint 加载成功，将继续训练！➡️")
    else:
        start_iter = 1
        logger.info("未提供模型 checkpoint，开始从头训练 🆕")

    # 加载数据集
    logger.info(f"开始加载数据集 📚：{train_config.train_dataset_path}")
    train_dataset: np.ndarray = np.load(
        train_config.train_dataset_path, mmap_mode="r+"
    )  # 指定 mmap 的方式来加载，从而进行高效读写
    logger.info("数据集加载成功 ✅")

    # 若指定了验证集，则加载验证集
    validate_dataset: np.ndarray
    if train_config.validate_dataset_path:
        logger.info(f"开始加载验证集 📚：{train_config.validate_dataset_path}")
        validate_dataset = np.load(
            train_config.validate_dataset_path, mmap_mode="r+"
        )  # 指定 mmap 的方式来加载，从而进行高效读写
        logger.info("验证集加载成功 ✅")
    else:
        logger.info("未指定验证集路径，跳过验证集加载 ⏭️")

    # 进行训练前的估计
    total_iters = int(train_dataset.shape[0] * train_config.epochs) // (
        train_config.train_batch_size * model_config.context_length
    )
    # 将 total_iters 向上取整为 validate_freq 的倍数
    if total_iters % train_config.validate_freq != 0:
        total_iters = (
            (total_iters + train_config.validate_freq - 1)
            // train_config.validate_freq
            * train_config.validate_freq
        )
    tokens_per_iter = (
        train_config.train_batch_size * model_config.context_length
    )  # 每次迭代的 token 数
    logger.info(
        f"训练的总token数: {total_iters * tokens_per_iter}, 训练总迭代数: {total_iters}, batch_size: {train_config.train_batch_size}, 上下文长度: {model_config.context_length}"
    )

    # 开始训练
    logger.info("开始模型训练 🚀")
    start_time = time.time()
    for step in tqdm(range(start_iter, total_iters + 1), desc="训练进度", unit="step"):
        # 清空梯度
        optim.zero_grad()

        # 余弦退火获取当前的学习率
        lr_now = utils.lr_cosine_schedule(
            t=step,
            a_max=optim_config.lr,
            a_min=optim_config.lr * 0.1,  # 设置最小学习率为最大值的 10%
            t_w=2000,  # 设置前 2000 次迭代为线性预热阶段
            t_c=total_iters,
        )
        for param_group in optim.param_groups:
            param_group["lr"] = lr_now

        # 抽样获得训练数据
        inputs, targets = utils.get_batch(
            dataset=train_dataset,
            batch_size=train_config.train_batch_size,
            context_length=model_config.context_length,
            device=device,
        )

        # 进行参数更新
        logits = model(inputs)  # 前向传播获得结果
        loss = utils.cross_entropy(logits, targets)  # 计算损失
        loss.backward()  # 反向传播计算梯度
        utils.gradient_clipping(
            model.parameters(), max_l2_norm=optim_config.max_l2_norm
        )  # 梯度裁剪
        optim.step()  # 使用优化器进行参数更新

        # 定时记录日志
        if step % train_config.log_freq == 0:
            grad_norm = utils.compute_grad_norm(model.parameters())
            logger.info(
                f"Step {step}:  Loss={loss.item()},  lr={lr_now},  Grad L2 Norm: {grad_norm}"
            )
            wandb.log(
                {
                    "train_loss": loss.item(),
                    "lr": lr_now,
                    "grad_l2_norm": grad_norm,
                    "wallclock_time": time.time() - start_time,
                },
                step=step * tokens_per_iter,  # 记录当前训练的token数
            )

        # 验证集测试
        if validate_dataset is not None and step % train_config.validate_freq == 0:
            val_loss = utils.evaluate_model(
                model=model,
                validate_dataset=validate_dataset,
                batch_size=train_config.validate_batch_size,
                context_length=model_config.context_length,
                device=device,
                eval_times=train_config.validate_eval_times,
            )
            logger.info(f"验证集 Loss: {val_loss}")
            wandb.log(
                {"validate_loss": val_loss, "wallclock_time": time.time() - start_time},
                step=step * tokens_per_iter,  # 记录当前训练的token数
            )

        # 保存检查点
        if step % train_config.checkpoint_freq == 0:
            save_dir = Path(train_config.checkpoint_save_dir)
            if not save_dir.exists():
                save_dir.mkdir(parents=True, exist_ok=True)

            checkpoint_path = save_dir.joinpath(f"checkpoint_{timestamp}_{step}.pt")
            logger.info(f"正在保存模型 checkpoint 到 {checkpoint_path} 💾")
            utils.save_checkpoint(
                model=model, optimizer=optim, iteration=step, out=checkpoint_path
            )
            logger.info("模型 checkpoint 保存成功 ✅")

    logger.info("模型训练成功 🎉")

    utils.save_checkpoint(
        model=model,
        optimizer=optim,
        iteration=total_iters,
        out=train_config.final_model_path,
    )
    logger.info(f"模型已保存到: {train_config.final_model_path} ✨")

    wandb.finish()  # 关闭 wandb
