# scripts/load_model.py
import os
import torch
from torch import nn
from torch.optim.lr_scheduler import CosineAnnealingLR

from config.config import model_cfg,train_cfg,paths_cfg
from models.llama_moe import LLaMA4MoEModel

def load_model_and_optimizer():
    model = LLaMA4MoEModel(
        vocab_size=model_cfg.vocab_size,
        d_model=model_cfg.d_model,
        n_layers=model_cfg.n_layers,
        n_heads=model_cfg.n_heads,
        block_size=model_cfg.block_size,
        rope_theta=model_cfg.rope_theta,
        num_experts=model_cfg.num_experts,
        experts_per_tok=model_cfg.experts_per_tok,
        expert_hidden_size=model_cfg.expert_hidden_size,
        shared_hidden_size=model_cfg.shared_hidden_size,
    ).to(train_cfg.device)

    optimizer = torch.optim.AdamW(model.parameters(), lr=train_cfg.learning_rate)
    loss_fn = nn.CrossEntropyLoss()

    start_step = 0

    # 总训练步数
    total_steps = train_cfg.epochs * train_cfg.steps_per_epoch
    # 创建余弦退火调度器
    scheduler = CosineAnnealingLR(
        optimizer,
        T_max=total_steps,
        eta_min=1e-8,  # 避免为0
    )

    checkpoint_path = os.path.join(paths_cfg.model_output_path,
                                   paths_cfg.checkpoint_file)

    if os.path.exists(checkpoint_path):
        print(f"🔁 加载检查点: {checkpoint_path}")

        ckpt = torch.load(checkpoint_path, map_location=train_cfg.device, weights_only=True)

        model.load_state_dict(ckpt['model_state_dict'],strict=False)    # 必须恢复
        optimizer.load_state_dict(ckpt['optimizer_state_dict'])         # 必须恢复

        start_step = ckpt['global_step']

        # 手动将学习率重置为初始值
        for param_group in optimizer.param_groups:
            param_group['lr'] = train_cfg.learning_rate

        print(f"✅ 恢复训练，global_step = {start_step}")
    else:
        print("🆕 未找到检查点，从头开始训练")

    return model, optimizer, loss_fn, start_step, scheduler