"""
训练引擎：封装训练循环、优化、日志、保存
"""
import os
import json
from typing import Dict, Any, Optional
from tqdm.auto import tqdm

import torch
from torch.optim import AdamW
from torch.utils.data import DataLoader
from torch.nn.utils import clip_grad_norm_

from src.model import JiaboForCausalLM, JiaboModelConfig
from src.dataset import JiaboDataset
from src.tokenizer import JiaboTokenizer

class JiaboTrainer:
    """
    通用训练器，支持：
    - 梯度累积
    - 梯度裁剪
    - 学习率预热
    - 断点续训
    - MPS加速
    """
    
    def __init__(self, train_cfg_path: str):
        with open(train_cfg_path, "r") as f:
            self.config = json.load(f)
        
        self.device = torch.device(self.config["device"])
        self.tokenizer = JiaboTokenizer(self.config["vocab_path"])
        self.model = None
        self.optimizer = None
        
        # 训练状态
        self.global_step = 0
        self.epoch = 0
    
    def setup_model(self, model_cfg_path: str):
        """初始化模型"""
        from .model import JiaboModelConfig
        
        with open(model_cfg_path, "r") as f:
            model_cfg = json.load(f)
        
        config = JiaboModelConfig(**model_cfg)
        self.model = JiaboForCausalLM(config).to(self.device)
        
        print(f"✅ 模型加载完成 | 参数量: {self._count_parameters():,}")
    
    def _count_parameters(self) -> int:
        """统计可训练参数量"""
        return sum(p.numel() for p in self.model.parameters() if p.requires_grad)
    
    def _setup_optimizer(self):
        """配置AdamW优化器"""
        self.optimizer = AdamW(
            self.model.parameters(),
            lr=self.config["learning_rate"],
            weight_decay=self.config["weight_decay"],
        )
    
    def _setup_scheduler(self, num_training_steps: int):
        """线性预热+衰减学习率调度器"""
        from torch.optim.lr_scheduler import LambdaLR
        
        def lr_lambda(current_step: int):
            if current_step < self.config["warmup_steps"]:
                return float(current_step) / float(max(1, self.config["warmup_steps"]))
            return max(
                0.0,
                float(num_training_steps - current_step) / float(max(1, num_training_steps - self.config["warmup_steps"]))
            )
        
        self.scheduler = LambdaLR(self.optimizer, lr_lambda)
    
    def train(self, model_cfg_path: str):
        """主训练循环"""
        # 1. 初始化
        self.setup_model(model_cfg_path)
        self._setup_optimizer()
        
        # 2. 数据准备
        dataset = JiaboDataset(
            self.config["data_path"],
            self.tokenizer,
            max_length=self.config["max_seq_length"],
        )
        dataloader = DataLoader(
            dataset,
            batch_size=self.config["batch_size"],
            shuffle=True,
            drop_last=True,
        )
        
        # 3. 学习率调度
        num_training_steps = len(dataloader) * self.config["num_epochs"] // self.config["gradient_accumulation_steps"]
        self._setup_scheduler(num_training_steps)
        
        # 4. 训练循环
        self.model.train()
        total_loss = 0
        
        for epoch in range(self.config["num_epochs"]):
            print(f"\n🚀 Epoch {epoch + 1}/{self.config['num_epochs']}")
            epoch_loss = 0
            
            pbar = tqdm(dataloader, desc="Training", leave=False)
            for step, batch in enumerate(pbar):
                # 数据移送到设备
                input_ids = batch["input_ids"].to(self.device)
                labels = batch["labels"].to(self.device)
                
                # 前向传播
                logits, loss = self.model(input_ids=input_ids, labels=labels)
                
                # 损失缩放（梯度累积）
                loss = loss / self.config["gradient_accumulation_steps"]
                loss.backward()
                
                total_loss += loss.item()
                epoch_loss += loss.item()
                
                # 梯度累积
                if (step + 1) % self.config["gradient_accumulation_steps"] == 0:
                    # 梯度裁剪
                    clip_grad_norm_(self.model.parameters(), max_norm=1.0)
                    
                    # 优化器步进
                    self.optimizer.step()
                    self.scheduler.step()
                    self.optimizer.zero_grad()
                    
                    self.global_step += 1
                    
                    # 日志
                    if self.global_step % self.config["logging_steps"] == 0:
                        avg_loss = total_loss / self.config["logging_steps"]
                        print(f"Step {self.global_step} | Loss: {avg_loss:.4f} | LR: {self.scheduler.get_last_lr()[0]:.6f}")
                        total_loss = 0
                    
                    # 保存checkpoint
                    if self.global_step % self.config["save_steps"] == 0:
                        self._save_checkpoint()
        
        print(f"\n✅ 训练完成！最终Loss: {epoch_loss / len(dataloader):.4f}")
        self._save_checkpoint("final_model.pt")
    
    def _save_checkpoint(self, filename: Optional[str] = None):
        """保存模型权重和训练状态"""
        os.makedirs(self.config["output_dir"], exist_ok=True)
        
        if filename is None:
            filename = f"checkpoint_step_{self.global_step}.pt"
        
        save_path = os.path.join(self.config["output_dir"], filename)
        
        torch.save({
            "model_state_dict": self.model.state_dict(),
            "optimizer_state_dict": self.optimizer.state_dict(),
            "scheduler_state_dict": self.scheduler.state_dict(),
            "global_step": self.global_step,
            "epoch": self.epoch,
            "config": self.config,
        }, save_path)
        
        print(f"💾 Checkpoint saved: {save_path}")
