"""
训练器 - 封装完整的训练流程
"""
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau, CosineAnnealingLR, StepLR
from tqdm import tqdm
import numpy as np
from pathlib import Path

from data.dataset import create_dataloaders
from data.utils import load_csv_data, print_dataset_statistics
from .metrics import MetricsCalculator
from utils.logger import Logger
from utils.checkpoint import save_checkpoint, load_checkpoint


class Trainer:
    """
    训练器类
    
    封装完整的训练流程，包括：
    - 数据加载
    - 模型训练和验证
    - 学习率调度
    - 模型保存
    - 日志记录
    
    参数:
        model: 模型实例
        config: 配置对象
    """
    
    def __init__(self, model, config):
        self.model = model
        self.config = config
        self.device = config.device
        
        # 创建保存目录
        self.save_dir = Path(config.save_dir) / config.experiment_name
        self.save_dir.mkdir(parents=True, exist_ok=True)
        
        # 加载数据
        self._load_data()
        
        # 设置优化器和调度器
        self._setup_optimizer()
        
        # 损失函数和评价指标
        self.criterion = nn.MSELoss()
        self.metrics_calc = MetricsCalculator()
        
        # 日志记录器
        self.logger = Logger(self.save_dir, config.experiment_name)
        
        # 训练历史
        self.history = {
            'train_loss': [],
            'val_loss': [],
            'train_metrics': [],
            'val_metrics': [],
            'learning_rates': []
        }
        
        # 最佳模型跟踪
        self.best_metric = -float('inf')
        self.best_epoch = 0
    
    def _load_data(self):
        """加载数据"""
        print("\n" + "="*70)
        print("Loading Data".center(70))
        print("="*70)
        
        # 加载训练数据
        train_data = load_csv_data(self.config.data_dir, self.config.train_csv)
        val_data = load_csv_data(self.config.data_dir, self.config.val_csv)
        
        if len(train_data) == 0 or len(val_data) == 0:
            raise ValueError("No data loaded! Check your data path and CSV files.")
        
        # 打印数据集统计信息
        print_dataset_statistics(train_data, val_data)
        
        # 创建数据加载器
        self.train_loader, self.val_loader = create_dataloaders(
            train_data, val_data,
            batch_size=self.config.batch_size,
            num_workers=self.config.num_workers,
            image_size=self.config.image_size,
            augment_train=self.config.augment,
            filter_negative=self.config.filter_negative
        )
    
    def _setup_optimizer(self):
        """设置优化器和学习率调度器"""
        # 优化器
        optimizer_name = self.config.optimizer.lower()
        
        if optimizer_name == 'adam':
            self.optimizer = optim.Adam(
                self.model.parameters(),
                lr=self.config.learning_rate,
                weight_decay=self.config.weight_decay
            )
        elif optimizer_name == 'adamw':
            self.optimizer = optim.AdamW(
                self.model.parameters(),
                lr=self.config.learning_rate,
                weight_decay=self.config.weight_decay
            )
        elif optimizer_name == 'sgd':
            self.optimizer = optim.SGD(
                self.model.parameters(),
                lr=self.config.learning_rate,
                momentum=0.9,
                weight_decay=self.config.weight_decay
            )
        else:
            raise ValueError(f"Unknown optimizer: {optimizer_name}")
        
        # 学习率调度器
        scheduler_type = self.config.scheduler_type
        
        if scheduler_type == 'plateau':
            self.scheduler = ReduceLROnPlateau(
                self.optimizer,
                mode=self.config.scheduler_mode,
                factor=self.config.scheduler_factor,
                patience=self.config.scheduler_patience,
                min_lr=self.config.scheduler_min_lr,
                verbose=True
            )
        elif scheduler_type == 'cosine':
            self.scheduler = CosineAnnealingLR(
                self.optimizer,
                T_max=self.config.epochs,
                eta_min=self.config.scheduler_min_lr
            )
        elif scheduler_type == 'step':
            self.scheduler = StepLR(
                self.optimizer,
                step_size=30,
                gamma=0.1
            )
        else:
            self.scheduler = None
            print("Warning: No learning rate scheduler")
    
    def train_epoch(self, epoch):
        """
        训练一个epoch
        
        参数:
            epoch: 当前epoch编号
        
        返回:
            epoch_loss: 平均损失
            metrics: 评价指标字典
        """
        self.model.train()
        running_loss = 0.0
        all_preds = []
        all_targets = []
        
        # 进度条
        pbar = tqdm(
            enumerate(self.train_loader),
            total=len(self.train_loader),
            desc=f'Epoch {epoch}/{self.config.epochs} [Train]',
            ncols=120,
            bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}] {postfix}'
        )
        
        for batch_idx, (images, scales, features, targets) in pbar:
            # 数据转移到设备
            images = images.to(self.device)
            scales = scales.to(self.device)
            features = features.to(self.device)
            targets = targets.to(self.device)
            
            # 前向传播
            self.optimizer.zero_grad()
            outputs = self.model(images, scales, features)
            loss = self.criterion(outputs, targets)
            
            # 反向传播
            loss.backward()
            
            # 梯度裁剪（可选）
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=5.0)
            
            self.optimizer.step()
            
            # 统计
            batch_loss = loss.item()
            running_loss += batch_loss * images.size(0)
            
            preds = outputs.detach().cpu().numpy()
            targets_np = targets.cpu().numpy()
            all_preds.extend(preds)
            all_targets.extend(targets_np)
            
            # 更新进度条
            avg_loss = running_loss / ((batch_idx + 1) * self.train_loader.batch_size)
            lr = self.optimizer.param_groups[0]['lr']
            pbar.set_postfix({
                'loss': f'{batch_loss:.4f}',
                'avg': f'{avg_loss:.4f}',
                'lr': f'{lr:.2e}'
            })
        
        # 计算指标
        epoch_loss = running_loss / len(self.train_loader.dataset)
        metrics = self.metrics_calc.calculate_all_metrics(all_targets, all_preds)
        
        return epoch_loss, metrics
    
    def validate_epoch(self, epoch):
        """
        验证一个epoch
        
        参数:
            epoch: 当前epoch编号
        
        返回:
            epoch_loss: 平均损失
            metrics: 评价指标字典
        """
        self.model.eval()
        running_loss = 0.0
        all_preds = []
        all_targets = []
        
        # 进度条
        pbar = tqdm(
            enumerate(self.val_loader),
            total=len(self.val_loader),
            desc=f'Epoch {epoch}/{self.config.epochs} [Val]',
            ncols=120,
            bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}] {postfix}'
        )
        
        with torch.no_grad():
            for batch_idx, (images, scales, features, targets) in pbar:
                # 数据转移到设备
                images = images.to(self.device)
                scales = scales.to(self.device)
                features = features.to(self.device)
                targets = targets.to(self.device)
                
                # 前向传播
                outputs = self.model(images, scales, features)
                loss = self.criterion(outputs, targets)
                
                # 统计
                batch_loss = loss.item()
                running_loss += batch_loss * images.size(0)
                
                preds = outputs.cpu().numpy()
                targets_np = targets.cpu().numpy()
                all_preds.extend(preds)
                all_targets.extend(targets_np)
                
                # 更新进度条
                avg_loss = running_loss / ((batch_idx + 1) * self.val_loader.batch_size)
                pbar.set_postfix({
                    'loss': f'{batch_loss:.4f}',
                    'avg': f'{avg_loss:.4f}'
                })
        
        # 计算指标
        epoch_loss = running_loss / len(self.val_loader.dataset)
        metrics = self.metrics_calc.calculate_all_metrics(all_targets, all_preds)
        
        return epoch_loss, metrics
    
    def train(self, start_epoch=0):
        """
        完整训练流程
        
        参数:
            start_epoch: 起始epoch（用于恢复训练）
        """
        print("\n" + "="*70)
        print(f"Starting Training".center(70))
        print("="*70)
        print(f"Training from epoch {start_epoch + 1} to {self.config.epochs}")
        print(f"Device: {self.device}")
        print("="*70)
        
        for epoch in range(start_epoch + 1, self.config.epochs + 1):
            # 训练
            train_loss, train_metrics = self.train_epoch(epoch)
            
            # 验证
            val_loss, val_metrics = self.validate_epoch(epoch)
            
            # 学习率调度
            if self.scheduler:
                if isinstance(self.scheduler, ReduceLROnPlateau):
                    self.scheduler.step(val_loss)
                else:
                    self.scheduler.step()
            
            # 记录历史
            self.history['train_loss'].append(train_loss)
            self.history['val_loss'].append(val_loss)
            self.history['train_metrics'].append(train_metrics)
            self.history['val_metrics'].append(val_metrics)
            self.history['learning_rates'].append(self.optimizer.param_groups[0]['lr'])
            
            # 打印epoch摘要
            self._print_epoch_summary(epoch, train_loss, train_metrics, val_loss, val_metrics)
            
            # 保存最佳模型
            if val_metrics['R²'] > self.best_metric:
                self.best_metric = val_metrics['R²']
                self.best_epoch = epoch
                self._save_best_model(epoch, val_metrics)
            
            # 定期保存checkpoint
            if epoch % self.config.save_period == 0:
                self._save_checkpoint(epoch)
            
            # 记录日志
            self.logger.log_epoch(epoch, train_loss, train_metrics, val_loss, val_metrics)
        
        # 训练完成
        self._training_complete()
    
    def _print_epoch_summary(self, epoch, train_loss, train_metrics, val_loss, val_metrics):
        """打印epoch摘要"""
        print(f"\n{'='*70}")
        print(f"Epoch {epoch}/{self.config.epochs} Summary".center(70))
        print(f"{'='*70}")
        
        print(f"\n[Train] Loss: {train_loss:.4f} | MAE: {train_metrics['MAE']:.4f} | "
              f"RMSE: {train_metrics['RMSE']:.4f} | R²: {train_metrics['R²']:.4f}")
        print(f"        MAPE: {train_metrics['MAPE']:.2f}% | Acc@10%: {train_metrics['Accuracy@10%']:.2f}%")
        
        print(f"\n[Val]   Loss: {val_loss:.4f} | MAE: {val_metrics['MAE']:.4f} | "
              f"RMSE: {val_metrics['RMSE']:.4f} | R²: {val_metrics['R²']:.4f}")
        print(f"        MAPE: {val_metrics['MAPE']:.2f}% | Acc@10%: {val_metrics['Accuracy@10%']:.2f}%")
        
        if val_metrics['R²'] > self.best_metric:
            print(f"\n✓ New best model! R² improved: {self.best_metric:.4f} -> {val_metrics['R²']:.4f}")
    
    def _save_best_model(self, epoch, metrics):
        """保存最佳模型"""
        save_path = self.save_dir / f'{self.config.experiment_name}_best.pth'
        save_checkpoint(
            save_path,
            epoch=epoch,
            model=self.model,
            optimizer=self.optimizer,
            scheduler=self.scheduler,
            metrics=metrics,
            config=self.config
        )
        print(f"  Saved best model to: {save_path}")
    
    def _save_checkpoint(self, epoch):
        """保存训练checkpoint"""
        save_path = self.save_dir / f'{self.config.experiment_name}_epoch{epoch}.pth'
        save_checkpoint(
            save_path,
            epoch=epoch,
            model=self.model,
            optimizer=self.optimizer,
            scheduler=self.scheduler,
            config=self.config
        )
        print(f"  Saved checkpoint to: {save_path}")
    
    def load_checkpoint(self, checkpoint_path):
        """
        加载checkpoint
        
        参数:
            checkpoint_path: checkpoint路径
        
        返回:
            start_epoch: 起始epoch
        """
        checkpoint = load_checkpoint(checkpoint_path, self.model, self.optimizer, self.scheduler)
        return checkpoint['epoch']
    
    def _training_complete(self):
        """训练完成"""
        print("\n" + "="*70)
        print("Training Completed!".center(70))
        print("="*70)
        print(f"\nBest model:")
        print(f"  Epoch: {self.best_epoch}")
        print(f"  R²: {self.best_metric:.4f}")
        print(f"  Path: {self.save_dir / f'{self.config.experiment_name}_best.pth'}")
        
        # 保存训练历史
        self.logger.save_history(self.history)
        print(f"\nTraining history saved to: {self.save_dir}")
        
        # ===== 新增：生成可视化 =====
        print("\n" + "="*70)
        print("Generating Visualizations".center(70))
        print("="*70)
        
        from utils.visualizer import Visualizer
        visualizer = Visualizer(self.save_dir)
        
        # 1. 绘制训练历史
        visualizer.plot_training_history(self.history)
        
        # 2. 绘制最佳模型的预测结果
        print("\nGenerating prediction plots...")
        self.model.eval()
        all_preds = []
        all_targets = []
        
        with torch.no_grad():
            for images, scales, features, targets in self.val_loader:
                images = images.to(self.device)
                scales = scales.to(self.device)
                features = features.to(self.device)
                
                outputs = self.model(images, scales, features)
                all_preds.extend(outputs.cpu().numpy())
                all_targets.extend(targets.numpy())
        
        visualizer.plot_predictions(all_targets, all_preds)
        
        # 3. 绘制指标对比
        visualizer.plot_metrics_comparison(self.history)
        
        # 4. 生成总结报告
        visualizer.create_summary_report(self.history, self.best_epoch)
        
        # ===== 新增的7个图 =====
        visualizer.plot_loss_components(self.history)
        visualizer.plot_error_analysis(all_targets, all_preds)
        visualizer.plot_metrics_evolution(self.history)
        visualizer.plot_learning_curve(self.history)
        visualizer.plot_prediction_intervals(all_targets, all_preds)
        visualizer.create_comparison_table(self.history, self.best_epoch)
        print("\n✓ All visualizations generated successfully!")
        print("="*70 + "\n")