# train.py
import os
import torch
from tqdm import tqdm
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

from config.config import model_cfg,train_cfg,paths_cfg
from dataset.dataset import StreamTokenDataset
from dataset.sampler import RandomSubsetSampler
from scripts.load_model import load_model_and_optimizer

# os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'

def save_checkpoint_safely(model,optimizer,global_step,save_dir,filename):
    """
    安全保存 checkpoint：先写临时文件，再原子替换
    """
    # 🔧 确保 save_dir 和 filename 是 str 类型
    if isinstance(save_dir, bytes):
        save_dir = save_dir.decode('utf-8')
    if isinstance(filename, bytes):
        filename = filename.decode('utf-8')
    if isinstance(save_dir, str):
        save_dir = save_dir.strip()
    if isinstance(filename, str):
        filename = filename.strip()

    # 确保目录存在
    os.makedirs(save_dir, exist_ok=True)

    # 定义路径
    save_path = os.path.join(save_dir, filename)
    temp_path = save_path + ".tmp"

    # ✅ 确保 temp_path 是 str
    if not isinstance(temp_path, str):
        raise ValueError(f"temp_path must be str, got {type(temp_path)}")

    try:
        # 1. 先保存到临时文件
        torch.save({
            'global_step': global_step,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
        }, temp_path)

        # 2. 原子性替换：os.replace 会自动处理“删除旧文件 + 重命名”
        # 在 POSIX 和 Windows 上都是原子或接近原子的操作
        os.replace(temp_path, save_path)

        # 可选：打印日志
        tqdm.write(f"✅ 安全保存检查点: {save_path}")

    except Exception as e:
        tqdm.write(f"❌ 保存检查点失败: {e}")
        if os.path.exists(temp_path):
            os.remove(temp_path)
        raise  # 保留异常向上抛出

def main():
    os.makedirs(paths_cfg.model_output_path, exist_ok=True)
    os.makedirs(paths_cfg.figures_path, exist_ok=True)

    # 加载模型
    model, optimizer, loss_fn, start_step, scheduler = load_model_and_optimizer()

    global_step = start_step # 全局步数(一步对应在一个batch上的训练)

    writer = SummaryWriter(log_dir=paths_cfg.log_dir) # tensorboard日志记录

    for epoch in range(train_cfg.epochs):
        print(f"🔄 开始第 {epoch + 1} 轮训练...")
        losses = []  # 记录单个epoch的损失变化

        # 每轮训练重新加载数据
        dataset = StreamTokenDataset(
            paths_cfg.encoded_ids_path,
            model_cfg.block_size,
            max_tokens=train_cfg.steps_per_epoch * train_cfg.batch_size * model_cfg.block_size+1,
            start_offset=0,  # 每次随机 offset 或者 指定偏移位置
        )
        sampler = RandomSubsetSampler(
            dataset,
            num_samples=train_cfg.steps_per_epoch * train_cfg.batch_size * model_cfg.block_size,
        )
        dataloader = DataLoader(
            dataset,
            sampler=sampler,
            batch_size=train_cfg.batch_size,
            drop_last=True,
            num_workers=0, # 加载数据
            pin_memory=True,
        )
        print(f"📊 数据集大小: {len(dataset)}，实际训练样本: {len(sampler)}，batch_size: {train_cfg.batch_size}")
        print(f"📈 每 epoch 最多 {train_cfg.steps_per_epoch} 步")

        progress_bar = tqdm(dataloader, desc=f"Epoch {epoch+1}/{train_cfg.epochs}", leave=False)

        for step,(input_ids, labels) in enumerate(progress_bar): # [B,T]
            input_ids: torch.Tensor # [B,T] 内容是token_id,对应词表的索引位置
            labels: torch.Tensor    # [B,T] 内容是token_id,对应词表的索引位置
            input_ids, labels = input_ids.to(train_cfg.device), labels.to(train_cfg.device) # 放到cuda上

            optimizer.zero_grad() # 梯度归零

            logits = model(input_ids) # 前向传播，得预测结果logits：[B,T,vocab_size]

            # logits.flatten(0, 1)将前两个维度拉平 [B*T,vocab_size]
            # labels.flatten()将所有维度拉平 [B*T]
            loss = loss_fn(logits.flatten(0, 1), labels.flatten()) # 交叉熵损失

            loss.backward()   # 反向传播
            # 梯度裁剪，防止爆炸，反向传播之后更新参数之前
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            optimizer.step()  # 更新参数
            scheduler.step()  # 更新学习率调度器

            global_step += 1  # 更新全局训练步数
            losses.append(loss.item())  # 记录损失

            progress_bar.set_postfix(loss=f"{loss.item():.4f}", )

            current_lr = optimizer.param_groups[0]['lr']  # 获取第一个参数组的学习率
            writer.add_scalar("Learning Rate", current_lr, global_step)
            writer.add_scalar("Loss/train", loss.item(), global_step)

            # 每轮保存两次，半程一次，结束一次
            if (step + 1) % (train_cfg.steps_per_epoch//2) == 0:
                save_checkpoint_safely(
                    model=model,
                    optimizer=optimizer,
                    global_step=global_step,
                    save_dir=paths_cfg.model_output_path,
                    filename=paths_cfg.checkpoint_file,
                )

        print(f"✅ 第 {epoch + 1} 轮训练完成 ")

        # 📊 绘图并保存到文件（避免阻塞）
        plt.figure(figsize=(10, 5))
        plt.plot(losses, color='blue', linewidth=1)
        plt.title("Training Loss")
        plt.xlabel("Step")
        plt.ylabel("Loss")
        plt.grid(True, alpha=0.3)
        plt.tight_layout()

        plot_path = os.path.join(paths_cfg.figures_path, f"training_loss_epoch_{epoch}.png")
        plt.savefig(plot_path)
        plt.close()  # 必须关闭，防止内存泄漏
        tqdm.write(f"📊 损失曲线已保存: {plot_path}")

    writer.close()

if __name__ == "__main__":
    main()
