# -*- coding: utf-8 -*-
"""
训练脚本 - 训练Transformer语言模型
包含训练循环、验证、模型保存、日志记录等功能
"""

import os
import time
import json
import logging
from datetime import datetime
from typing import Dict, Optional

import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR, LinearLR
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import coloredlogs

from config import config, ConfigTemplates
from data_loader import DataManager
from model import TransformerLanguageModel

# 设置日志
coloredlogs.install(level='INFO', fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class Trainer:
    """训练器类 - 管理模型训练过程"""
    
    def __init__(self, model: TransformerLanguageModel, data_manager: DataManager, 
                 config_obj: Optional[object] = None):
        self.model = model
        self.data_manager = data_manager
        self.config = config_obj or config
        
        # 设置设备
        self.device = self._setup_device()
        self.model.to(self.device)
        
        # 设置优化器和调度器
        self.optimizer = self._setup_optimizer()
        self.scheduler = self._setup_scheduler()
        
        # 设置损失函数
        self.criterion = nn.CrossEntropyLoss(ignore_index=-100)
        
        # 训练状态
        self.current_epoch = 0
        self.global_step = 0
        self.best_val_loss = float('inf')
        self.early_stopping_counter = 0
        
        # 日志和保存
        self.setup_logging()
        self.setup_directories()
        
        # 混合精度训练
        self.scaler = torch.cuda.amp.GradScaler() if self.config.training.use_fp16 else None
        
        logger.info(f"训练器初始化完成，使用设备: {self.device}")
        logger.info(f"模型参数数量: {self.model.get_model_size():,}")
    
    def _setup_device(self) -> torch.device:
        """设置训练设备"""
        if self.config.system.device == "auto":
            device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        else:
            device = torch.device(self.config.system.device)
        
        if device.type == "cuda":
            logger.info(f"使用GPU: {torch.cuda.get_device_name()}")
            logger.info(f"GPU内存: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB")
        else:
            logger.info("使用CPU训练")
        
        return device
    
    def _setup_optimizer(self) -> optim.Optimizer:
        """设置优化器"""
        if self.config.training.optimizer.lower() == "adam":
            optimizer = optim.Adam(
                self.model.parameters(),
                lr=self.config.training.learning_rate,
                betas=(self.config.training.beta1, self.config.training.beta2),
                eps=self.config.training.eps
            )
        elif self.config.training.optimizer.lower() == "adamw":
            optimizer = optim.AdamW(
                self.model.parameters(),
                lr=self.config.training.learning_rate,
                betas=(self.config.training.beta1, self.config.training.beta2),
                eps=self.config.training.eps,
                weight_decay=self.config.training.weight_decay
            )
        elif self.config.training.optimizer.lower() == "sgd":
            optimizer = optim.SGD(
                self.model.parameters(),
                lr=self.config.training.learning_rate,
                momentum=0.9,
                weight_decay=self.config.training.weight_decay
            )
        else:
            raise ValueError(f"不支持的优化器: {self.config.training.optimizer}")
        
        logger.info(f"优化器: {self.config.training.optimizer}, 学习率: {self.config.training.learning_rate}")
        return optimizer
    
    def _setup_scheduler(self) -> Optional[object]:
        """设置学习率调度器"""
        if self.config.training.scheduler.lower() == "cosine":
            scheduler = CosineAnnealingLR(
                self.optimizer,
                T_max=self.config.training.num_epochs,
                eta_min=self.config.training.learning_rate * 0.01
            )
        elif self.config.training.scheduler.lower() == "linear":
            scheduler = LinearLR(
                self.optimizer,
                start_factor=1.0,
                end_factor=0.01,
                total_iters=self.config.training.num_epochs
            )
        elif self.config.training.scheduler.lower() == "constant":
            scheduler = None
        else:
            raise ValueError(f"不支持的调度器: {self.config.training.scheduler}")
        
        logger.info(f"学习率调度器: {self.config.training.scheduler}")
        return scheduler
    
    def setup_logging(self):
        """设置日志记录"""
        # 创建实验目录
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        self.experiment_dir = os.path.join(
            self.config.system.output_dir,
            f"{self.config.system.experiment_name}_{timestamp}"
        )
        os.makedirs(self.experiment_dir, exist_ok=True)
        
        # 设置TensorBoard
        self.writer = SummaryWriter(os.path.join(self.experiment_dir, "tensorboard"))
        
        # 设置文件日志
        log_file = os.path.join(self.experiment_dir, "training.log")
        file_handler = logging.FileHandler(log_file, encoding='utf-8')
        file_handler.setLevel(logging.INFO)
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)
        
        logger.info(f"实验目录: {self.experiment_dir}")
    
    def setup_directories(self):
        """创建必要的目录"""
        self.model_save_dir = os.path.join(self.experiment_dir, "models")
        self.checkpoint_dir = os.path.join(self.experiment_dir, "checkpoints")
        
        os.makedirs(self.model_save_dir, exist_ok=True)
        os.makedirs(self.checkpoint_dir, exist_ok=True)
    
    def save_config(self):
        """保存训练配置"""
        config_path = os.path.join(self.experiment_dir, "config.json")
        self.config.save_config(config_path)
        logger.info(f"配置已保存到: {config_path}")
    
    def save_checkpoint(self, epoch: int, is_best: bool = False):
        """保存检查点"""
        checkpoint = {
            'epoch': epoch,
            'global_step': self.global_step,
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict() if self.scheduler else None,
            'best_val_loss': self.best_val_loss,
            'config': self.config.__dict__
        }
        
        # 保存最新检查点
        checkpoint_path = os.path.join(self.checkpoint_dir, "latest_checkpoint.pth")
        torch.save(checkpoint, checkpoint_path)
        
        # 保存最佳模型
        if is_best:
            best_model_path = os.path.join(self.model_save_dir, "best_model.pth")
            torch.save(checkpoint, best_model_path)
            logger.info(f"保存最佳模型: {best_model_path}")
        
        # 定期保存检查点
        if epoch % 5 == 0:
            epoch_checkpoint_path = os.path.join(self.checkpoint_dir, f"checkpoint_epoch_{epoch}.pth")
            torch.save(checkpoint, epoch_checkpoint_path)
    
    def load_checkpoint(self, checkpoint_path: str):
        """加载检查点"""
        if not os.path.exists(checkpoint_path):
            logger.warning(f"检查点文件不存在: {checkpoint_path}")
            return
        
        checkpoint = torch.load(checkpoint_path, map_location=self.device)
        
        self.model.load_state_dict(checkpoint['model_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        
        if self.scheduler and checkpoint['scheduler_state_dict']:
            self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        
        self.current_epoch = checkpoint['epoch']
        self.global_step = checkpoint['global_step']
        self.best_val_loss = checkpoint['best_val_loss']
        
        logger.info(f"检查点已加载: {checkpoint_path}")
        logger.info(f"从第 {self.current_epoch} 轮继续训练")
    
    def train_epoch(self, train_loader) -> Dict[str, float]:
        """训练一个epoch"""
        self.model.train()
        total_loss = 0.0
        total_tokens = 0
        
        progress_bar = tqdm(train_loader, desc=f"Epoch {self.current_epoch}")
        
        for batch_idx, batch in enumerate(progress_bar):
            # 移动数据到设备
            input_ids = batch['input_ids'].to(self.device)
            labels = batch['labels'].to(self.device)
            attention_mask = batch['attention_mask'].to(self.device)
            
            # 前向传播
            if self.config.training.use_fp16:
                with torch.cuda.amp.autocast():
                    outputs = self.model(input_ids, attention_mask, labels)
                    loss = outputs['loss']
            else:
                outputs = self.model(input_ids, attention_mask, labels)
                loss = outputs['loss']
            
            # 梯度累积
            loss = loss / self.config.training.gradient_accumulation_steps
            
            # 反向传播
            if self.config.training.use_fp16:
                self.scaler.scale(loss).backward()
            else:
                loss.backward()
            
            # 更新参数
            if (batch_idx + 1) % self.config.training.gradient_accumulation_steps == 0:
                if self.config.training.use_fp16:
                    self.scaler.unscale_(self.optimizer)
                    torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.training.max_grad_norm)
                    self.scaler.step(self.optimizer)
                    self.scaler.update()
                else:
                    torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.training.max_grad_norm)
                    self.optimizer.step()
                
                self.optimizer.zero_grad()
                self.global_step += 1
            
            # 统计
            total_loss += loss.item() * self.config.training.gradient_accumulation_steps
            total_tokens += input_ids.numel()
            
            # 更新进度条
            progress_bar.set_postfix({
                'loss': f"{loss.item():.4f}",
                'lr': f"{self.optimizer.param_groups[0]['lr']:.2e}"
            })
            
            # 记录日志
            if self.global_step % self.config.training.logging_steps == 0:
                self.writer.add_scalar('train/loss', loss.item(), self.global_step)
                self.writer.add_scalar('train/learning_rate', self.optimizer.param_groups[0]['lr'], self.global_step)
                self.writer.add_scalar('train/perplexity', torch.exp(loss).item(), self.global_step)
        
        avg_loss = total_loss / len(train_loader)
        perplexity = torch.exp(torch.tensor(avg_loss)).item()
        
        return {
            'loss': avg_loss,
            'perplexity': perplexity,
            'tokens': total_tokens
        }
    
    def validate(self, val_loader) -> Dict[str, float]:
        """验证模型"""
        self.model.eval()
        total_loss = 0.0
        total_tokens = 0
        
        with torch.no_grad():
            for batch in tqdm(val_loader, desc="验证"):
                input_ids = batch['input_ids'].to(self.device)
                labels = batch['labels'].to(self.device)
                attention_mask = batch['attention_mask'].to(self.device)
                
                outputs = self.model(input_ids, attention_mask, labels)
                loss = outputs['loss']
                
                total_loss += loss.item()
                total_tokens += input_ids.numel()
        
        avg_loss = total_loss / len(val_loader)
        perplexity = torch.exp(torch.tensor(avg_loss)).item()
        
        return {
            'loss': avg_loss,
            'perplexity': perplexity,
            'tokens': total_tokens
        }
    
    def train(self):
        """主训练循环"""
        logger.info("开始训练...")
        
        # 保存配置
        self.save_config()
        
        # 获取数据加载器
        train_loader = self.data_manager.get_dataloader('train', shuffle=True)
        val_loader = self.data_manager.get_dataloader('val', shuffle=False) if self.data_manager.val_dataset else None
        
        logger.info(f"训练批次数: {len(train_loader)}")
        if val_loader:
            logger.info(f"验证批次数: {len(val_loader)}")
        
        # 训练循环
        for epoch in range(self.current_epoch, self.config.training.num_epochs):
            self.current_epoch = epoch
            start_time = time.time()
            
            # 训练
            train_metrics = self.train_epoch(train_loader)
            
            # 验证
            val_metrics = None
            if val_loader and epoch % self.config.training.eval_steps == 0:
                val_metrics = self.validate(val_loader)
                
                # 记录验证指标
                self.writer.add_scalar('val/loss', val_metrics['loss'], epoch)
                self.writer.add_scalar('val/perplexity', val_metrics['perplexity'], epoch)
                
                # 检查是否为最佳模型
                is_best = val_metrics['loss'] < self.best_val_loss
                if is_best:
                    self.best_val_loss = val_metrics['loss']
                    self.early_stopping_counter = 0
                else:
                    self.early_stopping_counter += 1
                
                # 早停检查
                if self.early_stopping_counter >= self.config.training.early_stopping_patience:
                    logger.info(f"早停触发，在第 {epoch} 轮停止训练")
                    break
            else:
                is_best = False
            
            # 更新学习率
            if self.scheduler:
                self.scheduler.step()
            
            # 保存检查点
            if epoch % self.config.training.save_steps == 0 or is_best:
                self.save_checkpoint(epoch, is_best)
            
            # 记录训练指标
            self.writer.add_scalar('train/epoch_loss', train_metrics['loss'], epoch)
            self.writer.add_scalar('train/epoch_perplexity', train_metrics['perplexity'], epoch)
            
            # 打印训练信息
            epoch_time = time.time() - start_time
            logger.info(f"Epoch {epoch}/{self.config.training.num_epochs}:")
            logger.info(f"  训练损失: {train_metrics['loss']:.4f}, 困惑度: {train_metrics['perplexity']:.2f}")
            if val_metrics:
                logger.info(f"  验证损失: {val_metrics['loss']:.4f}, 困惑度: {val_metrics['perplexity']:.2f}")
            logger.info(f"  学习率: {self.optimizer.param_groups[0]['lr']:.2e}")
            logger.info(f"  耗时: {epoch_time:.2f}秒")
            logger.info("-" * 50)
        
        # 保存最终模型
        final_model_path = os.path.join(self.model_save_dir, "final_model.pth")
        torch.save({
            'model_state_dict': self.model.state_dict(),
            'config': self.config.__dict__,
            'model_info': self.model.get_model_info()
        }, final_model_path)
        
        logger.info(f"训练完成! 最终模型保存到: {final_model_path}")
        logger.info(f"最佳验证损失: {self.best_val_loss:.4f}")
        
        # 关闭TensorBoard
        self.writer.close()


def main():
    """主函数"""
    # 设置随机种子
    torch.manual_seed(config.system.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(config.system.seed)
    
    logger.info("初始化训练环境...")
    
    # 创建数据管理器
    data_manager = DataManager()
    
    # 准备数据
    data_manager.prepare_data(
        train_path=config.data.train_data_path,
        val_path=config.data.val_data_path,
        test_path=config.data.test_data_path
    )
    
    # 更新模型配置中的词汇表大小
    config.model.vocab_size = data_manager.tokenizer.vocab_size
    
    # 创建模型
    model = TransformerLanguageModel()
    logger.info(f"模型创建成功，参数数量: {model.get_model_size():,}")
    
    # 创建训练器
    trainer = Trainer(model, data_manager, config)
    
    # 从检查点恢复（如果指定）
    if config.system.resume_from_checkpoint:
        trainer.load_checkpoint(config.system.resume_from_checkpoint)
    
    # 开始训练
    trainer.train()


if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        logger.info("训练被用户中断")
    except Exception as e:
        logger.error(f"训练过程中发生错误: {str(e)}")
        raise