"""
训练器实现
支持预训练、SFT、RLHF等不同阶段的训练
"""

import os
import time
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.data import DataLoader
from tqdm import tqdm
from typing import Optional, Dict, Any, List
import wandb
from contextlib import nullcontext

from model_config import LLMConfig
from transformer_model import LLMForCausalLM
from tokenizer import LLMTokenizer
from dataset import create_pretrain_dataloader, create_sft_dataloader, create_dpo_dataloader


class Trainer:
    """基础训练器"""
    
    def __init__(
        self,
        model: LLMForCausalLM,
        tokenizer: LLMTokenizer,
        config: LLMConfig,
        device: str = "cuda",
        use_wandb: bool = False,
        project_name: str = "llm_training"
    ):
        self.model = model
        self.tokenizer = tokenizer
        self.config = config
        self.device = device
        self.use_wandb = use_wandb
        
        # 移动模型到设备
        self.model.to(device)
        
        # 初始化wandb
        if use_wandb:
            wandb.init(project=project_name, config=config.__dict__)
    
    def save_model(self, save_path: str):
        """保存模型"""
        os.makedirs(save_path, exist_ok=True)
        
        # 保存模型权重
        torch.save(self.model.state_dict(), os.path.join(save_path, "pytorch_model.bin"))
        
        # 保存配置
        self.config.save_pretrained(save_path)
        
        # 保存分词器
        self.tokenizer.save_pretrained(save_path)
    
    def load_model(self, load_path: str):
        """加载模型"""
        # 加载模型权重
        state_dict = torch.load(os.path.join(load_path, "pytorch_model.bin"), map_location=self.device)
        self.model.load_state_dict(state_dict)
        
        # 加载配置
        config = LLMConfig.from_pretrained(load_path)
        self.config = config


class PretrainTrainer(Trainer):
    """预训练训练器"""
    
    def __init__(
        self,
        model: LLMForCausalLM,
        tokenizer: LLMTokenizer,
        config: LLMConfig,
        **kwargs
    ):
        super().__init__(model, tokenizer, config, **kwargs)
        
        # 优化器
        self.optimizer = AdamW(
            self.model.parameters(),
            lr=config.learning_rate if hasattr(config, 'learning_rate') else 1e-4,
            weight_decay=0.01
        )
        
        # 学习率调度器
        self.scheduler = CosineAnnealingLR(
            self.optimizer,
            T_max=config.num_epochs if hasattr(config, 'num_epochs') else 100
        )
        
        # 梯度缩放器（用于混合精度训练）
        self.scaler = torch.cuda.amp.GradScaler()
    
    def train_epoch(self, dataloader: DataLoader, epoch: int) -> Dict[str, float]:
        """训练一个epoch"""
        self.model.train()
        total_loss = 0
        num_batches = len(dataloader)
        
        progress_bar = tqdm(dataloader, desc=f"Epoch {epoch}")
        
        for batch_idx, batch in enumerate(progress_bar):
            # 移动数据到设备
            input_ids = batch['input_ids'].to(self.device)
            labels = batch['labels'].to(self.device)
            loss_mask = batch['loss_mask'].to(self.device)
            
            # 前向传播
            with torch.cuda.amp.autocast():
                outputs = self.model(input_ids=input_ids, labels=labels)
                loss = outputs.loss
                
                # 应用损失掩码
                if loss_mask is not None:
                    loss = (loss * loss_mask).sum() / loss_mask.sum()
                
                # 添加MoE辅助损失
                if hasattr(outputs, 'aux_loss') and outputs.aux_loss > 0:
                    loss = loss + outputs.aux_loss
            
            # 反向传播
            self.scaler.scale(loss).backward()
            
            # 梯度裁剪
            self.scaler.unscale_(self.optimizer)
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
            
            # 优化器步骤
            self.scaler.step(self.optimizer)
            self.scaler.update()
            self.optimizer.zero_grad()
            
            # 更新学习率
            self.scheduler.step()
            
            # 记录损失
            total_loss += loss.item()
            avg_loss = total_loss / (batch_idx + 1)
            
            # 更新进度条
            progress_bar.set_postfix({
                'loss': f'{loss.item():.4f}',
                'avg_loss': f'{avg_loss:.4f}',
                'lr': f'{self.optimizer.param_groups[0]["lr"]:.2e}'
            })
            
            # 记录到wandb
            if self.use_wandb:
                wandb.log({
                    'train/loss': loss.item(),
                    'train/avg_loss': avg_loss,
                    'train/learning_rate': self.optimizer.param_groups[0]['lr'],
                    'train/epoch': epoch
                })
        
        return {'loss': total_loss / num_batches}
    
    def train(
        self,
        train_dataloader: DataLoader,
        num_epochs: int = 10,
        save_interval: int = 1000,
        save_path: str = "./checkpoints"
    ):
        """完整训练流程"""
        for epoch in range(num_epochs):
            print(f"\n开始训练 Epoch {epoch + 1}/{num_epochs}")
            
            # 训练一个epoch
            metrics = self.train_epoch(train_dataloader, epoch)
            
            print(f"Epoch {epoch + 1} 完成，平均损失: {metrics['loss']:.4f}")
            
            # 保存检查点
            if (epoch + 1) % save_interval == 0:
                checkpoint_path = os.path.join(save_path, f"epoch_{epoch + 1}")
                self.save_model(checkpoint_path)
                print(f"模型已保存到: {checkpoint_path}")


class SFTTrainer(Trainer):
    """监督微调训练器"""
    
    def __init__(
        self,
        model: LLMForCausalLM,
        tokenizer: LLMTokenizer,
        config: LLMConfig,
        **kwargs
    ):
        super().__init__(model, tokenizer, config, **kwargs)
        
        # 优化器
        self.optimizer = AdamW(
            self.model.parameters(),
            lr=config.learning_rate if hasattr(config, 'learning_rate') else 5e-5,
            weight_decay=0.01
        )
        
        # 学习率调度器
        self.scheduler = CosineAnnealingLR(
            self.optimizer,
            T_max=config.num_epochs if hasattr(config, 'num_epochs') else 10
        )
        
        # 梯度缩放器
        self.scaler = torch.cuda.amp.GradScaler()
    
    def train_epoch(self, dataloader: DataLoader, epoch: int) -> Dict[str, float]:
        """训练一个epoch"""
        self.model.train()
        total_loss = 0
        num_batches = len(dataloader)
        
        progress_bar = tqdm(dataloader, desc=f"SFT Epoch {epoch}")
        
        for batch_idx, batch in enumerate(progress_bar):
            # 移动数据到设备
            input_ids = batch['input_ids'].to(self.device)
            labels = batch['labels'].to(self.device)
            loss_mask = batch['loss_mask'].to(self.device)
            
            # 前向传播
            with torch.cuda.amp.autocast():
                outputs = self.model(input_ids=input_ids, labels=labels)
                loss = outputs.loss
                
                # 应用损失掩码（只对助手回复部分计算损失）
                if loss_mask is not None:
                    loss = (loss * loss_mask).sum() / loss_mask.sum()
                
                # 添加MoE辅助损失
                if hasattr(outputs, 'aux_loss') and outputs.aux_loss > 0:
                    loss = loss + outputs.aux_loss
            
            # 反向传播
            self.scaler.scale(loss).backward()
            
            # 梯度裁剪
            self.scaler.unscale_(self.optimizer)
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
            
            # 优化器步骤
            self.scaler.step(self.optimizer)
            self.scaler.update()
            self.optimizer.zero_grad()
            
            # 更新学习率
            self.scheduler.step()
            
            # 记录损失
            total_loss += loss.item()
            avg_loss = total_loss / (batch_idx + 1)
            
            # 更新进度条
            progress_bar.set_postfix({
                'loss': f'{loss.item():.4f}',
                'avg_loss': f'{avg_loss:.4f}',
                'lr': f'{self.optimizer.param_groups[0]["lr"]:.2e}'
            })
            
            # 记录到wandb
            if self.use_wandb:
                wandb.log({
                    'sft/loss': loss.item(),
                    'sft/avg_loss': avg_loss,
                    'sft/learning_rate': self.optimizer.param_groups[0]['lr'],
                    'sft/epoch': epoch
                })
        
        return {'loss': total_loss / num_batches}
    
    def train(
        self,
        train_dataloader: DataLoader,
        num_epochs: int = 5,
        save_interval: int = 500,
        save_path: str = "./checkpoints"
    ):
        """完整训练流程"""
        for epoch in range(num_epochs):
            print(f"\n开始SFT训练 Epoch {epoch + 1}/{num_epochs}")
            
            # 训练一个epoch
            metrics = self.train_epoch(train_dataloader, epoch)
            
            print(f"SFT Epoch {epoch + 1} 完成，平均损失: {metrics['loss']:.4f}")
            
            # 保存检查点
            if (epoch + 1) % save_interval == 0:
                checkpoint_path = os.path.join(save_path, f"sft_epoch_{epoch + 1}")
                self.save_model(checkpoint_path)
                print(f"模型已保存到: {checkpoint_path}")


class DPOTrainer(Trainer):
    """DPO训练器"""
    
    def __init__(
        self,
        model: LLMForCausalLM,
        tokenizer: LLMTokenizer,
        config: LLMConfig,
        beta: float = 0.1,
        **kwargs
    ):
        super().__init__(model, tokenizer, config, **kwargs)
        self.beta = beta
        
        # 优化器
        self.optimizer = AdamW(
            self.model.parameters(),
            lr=config.learning_rate if hasattr(config, 'learning_rate') else 1e-5,
            weight_decay=0.01
        )
        
        # 学习率调度器
        self.scheduler = CosineAnnealingLR(
            self.optimizer,
            T_max=config.num_epochs if hasattr(config, 'num_epochs') else 5
        )
        
        # 梯度缩放器
        self.scaler = torch.cuda.amp.GradScaler()
    
    def compute_dpo_loss(
        self,
        chosen_logits: torch.Tensor,
        rejected_logits: torch.Tensor,
        chosen_labels: torch.Tensor,
        rejected_labels: torch.Tensor,
        chosen_mask: torch.Tensor,
        rejected_mask: torch.Tensor
    ) -> torch.Tensor:
        """计算DPO损失"""
        # 计算chosen和rejected的log概率
        chosen_log_probs = F.log_softmax(chosen_logits, dim=-1)
        rejected_log_probs = F.log_softmax(rejected_logits, dim=-1)
        
        # 计算每个token的log概率
        chosen_token_log_probs = chosen_log_probs.gather(-1, chosen_labels.unsqueeze(-1)).squeeze(-1)
        rejected_token_log_probs = rejected_log_probs.gather(-1, rejected_labels.unsqueeze(-1)).squeeze(-1)
        
        # 应用掩码
        chosen_token_log_probs = chosen_token_log_probs * chosen_mask
        rejected_token_log_probs = rejected_token_log_probs * rejected_mask
        
        # 计算序列级log概率
        chosen_log_probs = chosen_token_log_probs.sum(dim=-1)
        rejected_log_probs = rejected_token_log_probs.sum(dim=-1)
        
        # 计算DPO损失
        logits = self.beta * (chosen_log_probs - rejected_log_probs)
        loss = -F.logsigmoid(logits).mean()
        
        return loss
    
    def train_epoch(self, dataloader: DataLoader, epoch: int) -> Dict[str, float]:
        """训练一个epoch"""
        self.model.train()
        total_loss = 0
        num_batches = len(dataloader)
        
        progress_bar = tqdm(dataloader, desc=f"DPO Epoch {epoch}")
        
        for batch_idx, batch in enumerate(progress_bar):
            # 移动数据到设备
            x_chosen = batch['x_chosen'].to(self.device)
            y_chosen = batch['y_chosen'].to(self.device)
            mask_chosen = batch['mask_chosen'].to(self.device)
            x_rejected = batch['x_rejected'].to(self.device)
            y_rejected = batch['y_rejected'].to(self.device)
            mask_rejected = batch['mask_rejected'].to(self.device)
            
            # 前向传播
            with torch.cuda.amp.autocast():
                # 计算chosen和rejected的logits
                chosen_outputs = self.model(input_ids=x_chosen, labels=y_chosen)
                rejected_outputs = self.model(input_ids=x_rejected, labels=y_rejected)
                
                chosen_logits = chosen_outputs.logits
                rejected_logits = rejected_outputs.logits
                
                # 计算DPO损失
                loss = self.compute_dpo_loss(
                    chosen_logits, rejected_logits,
                    y_chosen, y_rejected,
                    mask_chosen, mask_rejected
                )
                
                # 添加MoE辅助损失
                if hasattr(chosen_outputs, 'aux_loss') and chosen_outputs.aux_loss > 0:
                    loss = loss + chosen_outputs.aux_loss
                if hasattr(rejected_outputs, 'aux_loss') and rejected_outputs.aux_loss > 0:
                    loss = loss + rejected_outputs.aux_loss
            
            # 反向传播
            self.scaler.scale(loss).backward()
            
            # 梯度裁剪
            self.scaler.unscale_(self.optimizer)
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
            
            # 优化器步骤
            self.scaler.step(self.optimizer)
            self.scaler.update()
            self.optimizer.zero_grad()
            
            # 更新学习率
            self.scheduler.step()
            
            # 记录损失
            total_loss += loss.item()
            avg_loss = total_loss / (batch_idx + 1)
            
            # 更新进度条
            progress_bar.set_postfix({
                'loss': f'{loss.item():.4f}',
                'avg_loss': f'{avg_loss:.4f}',
                'lr': f'{self.optimizer.param_groups[0]["lr"]:.2e}'
            })
            
            # 记录到wandb
            if self.use_wandb:
                wandb.log({
                    'dpo/loss': loss.item(),
                    'dpo/avg_loss': avg_loss,
                    'dpo/learning_rate': self.optimizer.param_groups[0]['lr'],
                    'dpo/epoch': epoch
                })
        
        return {'loss': total_loss / num_batches}
    
    def train(
        self,
        train_dataloader: DataLoader,
        num_epochs: int = 3,
        save_interval: int = 100,
        save_path: str = "./checkpoints"
    ):
        """完整训练流程"""
        for epoch in range(num_epochs):
            print(f"\n开始DPO训练 Epoch {epoch + 1}/{num_epochs}")
            
            # 训练一个epoch
            metrics = self.train_epoch(train_dataloader, epoch)
            
            print(f"DPO Epoch {epoch + 1} 完成，平均损失: {metrics['loss']:.4f}")
            
            # 保存检查点
            if (epoch + 1) % save_interval == 0:
                checkpoint_path = os.path.join(save_path, f"dpo_epoch_{epoch + 1}")
                self.save_model(checkpoint_path)
                print(f"模型已保存到: {checkpoint_path}")