#!/usr/bin/env python3
"""
增强版气象预测模型训练脚本
集成ViT升级、时间序列建模和优化物理约束
"""

import os
import sys
import yaml
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.cuda.amp import GradScaler, autocast
import numpy as np
from tqdm import tqdm
import logging
from datetime import datetime
import json
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import warnings
warnings.filterwarnings('ignore')

# 导入增强模型和数据处理
from models_enhanced import EnhancedCombinedModel, create_enhanced_model
from data_utils_final import WeatherDataset, collate_fn
from physics_constraints import PhysicalLossConstraints

class ProgressiveTraining:
    """渐进式训练策略"""
    def __init__(self, model, config):
        self.model = model
        self.config = config
        self.current_epoch = 0
        
        # 获取渐进式训练配置
        self.progressive_config = config['training'].get('progressive_training', {})
        self.enabled = self.progressive_config.get('enabled', False)
        
        if self.enabled:
            self.vit_freeze_epochs = self.progressive_config.get('vit_freeze_epochs', 5)
            self.temporal_freeze_epochs = self.progressive_config.get('temporal_freeze_epochs', 3)
            self.unfreeze_lr = self.progressive_config.get('unfreeze_lr', 1e-5)
    
    def freeze_vit(self):
        """冻结ViT参数"""
        for param in self.model.vit_assimilation.parameters():
            param.requires_grad = False
        print("✓ ViT parameters frozen")
    
    def unfreeze_vit(self):
        """解冻ViT参数"""
        for param in self.model.vit_assimilation.parameters():
            param.requires_grad = True
        print("✓ ViT parameters unfrozen")
    
    def freeze_temporal(self):
        """冻结时间编码器参数"""
        for param in self.model.temporal_encoder.parameters():
            param.requires_grad = False
        print("✓ Temporal encoder parameters frozen")
    
    def unfreeze_temporal(self):
        """解冻时间编码器参数"""
        for param in self.model.temporal_encoder.parameters():
            param.requires_grad = True
        print("✓ Temporal encoder parameters unfrozen")
    
    def get_parameter_groups(self, optimizer_type='adam'):
        """获取不同学习率的参数组"""
        if not self.enabled:
            return self.model.parameters()
        
        vit_params = []
        temporal_params = []
        other_params = []
        
        for name, param in self.model.named_parameters():
            if 'vit_assimilation' in name:
                vit_params.append(param)
            elif 'temporal_encoder' in name:
                temporal_params.append(param)
            else:
                other_params.append(param)
        
        if optimizer_type == 'adam':
            return [
                {'params': vit_params, 'lr': self.config['training']['lr'] * 0.1},
                {'params': temporal_params, 'lr': self.config['training']['lr'] * 0.5},
                {'params': other_params, 'lr': self.config['training']['lr']}
            ]
        else:
            return self.model.parameters()
    
    def update_epoch(self, epoch):
        """更新当前epoch并调整参数冻结状态"""
        self.current_epoch = epoch
        
        if not self.enabled:
            return
        
        # 检查是否需要解冻参数
        if epoch == self.vit_freeze_epochs:
            self.unfreeze_vit()
            print(f"Epoch {epoch}: Unfreezing ViT parameters")
        
        if epoch == self.temporal_freeze_epochs:
            self.unfreeze_temporal()
            print(f"Epoch {epoch}: Unfreezing temporal encoder parameters")

class SimplePhysicsLoss:
    """简化的物理损失函数"""
    def __init__(self, config):
        self.config = config
    
    def __call__(self, predictions, satellite_data):
        """简化的物理约束损失"""
        # 基本的平滑性约束
        if len(predictions.shape) >= 3:
            # 对空间维度应用平滑性约束
            spatial_diff = torch.diff(predictions, dim=-1) ** 2 + torch.diff(predictions, dim=-2) ** 2
            smoothness_loss = torch.mean(spatial_diff)
        else:
            smoothness_loss = torch.tensor(0.0, device=predictions.device)
        
        # 时间连续性约束
        if len(predictions.shape) >= 2:
            temporal_diff = torch.diff(predictions, dim=-2) ** 2
            temporal_loss = torch.mean(temporal_diff)
        else:
            temporal_loss = torch.tensor(0.0, device=predictions.device)
        
        return smoothness_loss + temporal_loss

class EnhancedTrainer:
    """增强版训练器"""
    def __init__(self, config):
        self.config = config
        self.device = torch.device(config['training']['device'] if torch.cuda.is_available() else 'cpu')
        
        # 设置日志
        self.setup_logging()
        
        # 创建模型
        self.model = create_enhanced_model(config)
        self.model.to(self.device)
        
        # 渐进式训练
        self.progressive_training = ProgressiveTraining(self.model, config)
        
        # 初始化渐进式训练
        if self.progressive_training.enabled:
            self.progressive_training.freeze_vit()
            self.progressive_training.freeze_temporal()
        
        # 多GPU设置
        if config['training']['multi_gpu']['enabled'] and torch.cuda.device_count() > 1:
            self.model = nn.DataParallel(self.model)
            self.logger.info(f"Using {torch.cuda.device_count()} GPUs")
        
        # 优化器
        param_groups = self.progressive_training.get_parameter_groups('adam')
        self.optimizer = optim.AdamW(
            param_groups,
            weight_decay=config['training']['weight_decay']
        )
        
        # 学习率调度器
        self.setup_scheduler()
        
        # 损失函数
        self.criterion = nn.MSELoss()
        self.physics_loss = SimplePhysicsLoss(config)
        
        # 混合精度训练
        self.scaler = GradScaler() if config['training']['mixed_precision']['enabled'] else None
        
        # 数据加载器
        self.setup_data_loaders()
        
        # 训练状态
        self.current_epoch = 0
        self.best_val_loss = float('inf')
        self.train_losses = []
        self.val_losses = []
        
        # 创建保存目录
        os.makedirs(config['training']['save_dir'], exist_ok=True)
        os.makedirs(config['training']['log_dir'], exist_ok=True)
    
    def setup_logging(self):
        """设置日志"""
        log_dir = self.config['training']['log_dir']
        os.makedirs(log_dir, exist_ok=True)
        
        log_file = os.path.join(log_dir, f"training_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log")
        
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(levelname)s - %(message)s',
            handlers=[
                logging.FileHandler(log_file),
                logging.StreamHandler(sys.stdout)
            ]
        )
        self.logger = logging.getLogger(__name__)
    
    def setup_scheduler(self):
        """设置学习率调度器"""
        scheduler_config = self.config['training'].get('lr_scheduler', {})
        scheduler_type = scheduler_config.get('type', 'cosine')
        
        if scheduler_type == 'cosine':
            self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer,
                T_max=scheduler_config.get('T_max', 100),
                eta_min=scheduler_config.get('min_lr', 1e-5)
            )
        elif scheduler_type == 'step':
            self.scheduler = optim.lr_scheduler.StepLR(
                self.optimizer,
                step_size=30,
                gamma=0.1
            )
        elif scheduler_type == 'exponential':
            self.scheduler = optim.lr_scheduler.ExponentialLR(
                self.optimizer,
                gamma=0.95
            )
        else:
            self.scheduler = None
        
        # Warmup
        if scheduler_config.get('warmup_epochs', 0) > 0:
            self.warmup_epochs = scheduler_config['warmup_epochs']
            self.warmup_scheduler = optim.lr_scheduler.LambdaLR(
                self.optimizer,
                lr_lambda=lambda epoch: min(1.0, (epoch + 1) / self.warmup_epochs)
            )
        else:
            self.warmup_scheduler = None
    
    def setup_data_loaders(self):
        """设置数据加载器"""
        # 创建数据集
        train_dataset = WeatherDataset(
            station_csv=self.config['data']['station_csv'],
            himawari_dir=self.config['data']['himawari_dir'],
            historical_days=self.config['data']['historical_days'],
            future_hours=self.config['data']['future_hours']
        )
        
        val_dataset = WeatherDataset(
            station_csv=self.config['data']['station_csv'],
            himawari_dir=self.config['data']['himawari_dir'],
            historical_days=self.config['data']['historical_days'],
            future_hours=self.config['data']['future_hours']
        )
        
        # 简单分割训练和验证集
        total_size = len(train_dataset)
        train_size = int(0.8 * total_size)
        val_size = total_size - train_size
        
        train_indices = list(range(train_size))
        val_indices = list(range(train_size, total_size))
        
        train_dataset = torch.utils.data.Subset(train_dataset, train_indices)
        val_dataset = torch.utils.data.Subset(val_dataset, val_indices)
        
        # 数据加载器
        batch_size = self.config['training']['batch_size']
        num_workers = self.config['training']['num_workers']
        
        self.train_loader = DataLoader(
            train_dataset,
            batch_size=batch_size,
            shuffle=True,
            num_workers=num_workers,
            collate_fn=collate_fn,
            pin_memory=True
        )
        
        self.val_loader = DataLoader(
            val_dataset,
            batch_size=batch_size,
            shuffle=False,
            num_workers=num_workers,
            collate_fn=collate_fn,
            pin_memory=True
        )
        
        self.logger.info(f"Training samples: {len(train_dataset)}")
        self.logger.info(f"Validation samples: {len(val_dataset)}")
    
    def train_epoch(self):
        """训练一个epoch"""
        self.model.train()
        total_loss = 0
        physics_loss_total = 0
        num_batches = 0
        
        pbar = tqdm(self.train_loader, desc=f"Training Epoch {self.current_epoch}")
        
        for batch_idx, batch in enumerate(pbar):
            # 数据移动到设备
            station_data = batch['station_data'].to(self.device)
            himawari_data = batch['himawari_data'].to(self.device)
            targets = batch['target_data'].to(self.device)
            
            # 前向传播
            self.optimizer.zero_grad()
            
            if self.scaler is not None:
                with autocast():
                    outputs = self.model(station_data, himawari_data)
                    data_loss = self.criterion(outputs, targets)
                    physics_loss_val = self.physics_loss(outputs, himawari_data)
                    loss = data_loss + 0.1 * physics_loss_val
                
                # 反向传播
                self.scaler.scale(loss).backward()
                
                # 梯度裁剪
                if self.config['training'].get('gradient_clip', 0) > 0:
                    self.scaler.unscale_(self.optimizer)
                    torch.nn.utils.clip_grad_norm_(
                        self.model.parameters(), 
                        self.config['training']['gradient_clip']
                    )
                
                self.scaler.step(self.optimizer)
                self.scaler.update()
            else:
                outputs = self.model(station_data, himawari_data)
                data_loss = self.criterion(outputs, targets)
                physics_loss_val = self.physics_loss(outputs, himawari_data)
                loss = data_loss + 0.1 * physics_loss_val
                
                loss.backward()
                
                # 梯度裁剪
                if self.config['training'].get('gradient_clip', 0) > 0:
                    torch.nn.utils.clip_grad_norm_(
                        self.model.parameters(), 
                        self.config['training']['gradient_clip']
                    )
                
                self.optimizer.step()
            
            total_loss += loss.item()
            physics_loss_total += physics_loss_val.item()
            num_batches += 1
            
            # 更新进度条
            pbar.set_postfix({
                'Loss': f'{loss.item():.6f}',
                'Data Loss': f'{data_loss.item():.6f}',
                'Physics Loss': f'{physics_loss_val.item():.6f}'
            })
        
        avg_loss = total_loss / num_batches
        avg_physics_loss = physics_loss_total / num_batches
        
        return avg_loss, avg_physics_loss
    
    def validate_epoch(self):
        """验证一个epoch"""
        self.model.eval()
        total_loss = 0
        physics_loss_total = 0
        num_batches = 0
        
        all_predictions = []
        all_targets = []
        
        with torch.no_grad():
            pbar = tqdm(self.val_loader, desc=f"Validation Epoch {self.current_epoch}")
            
            for batch in pbar:
                # 数据移动到设备
                station_data = batch['station_data'].to(self.device)
                himawari_data = batch['himawari_data'].to(self.device)
                targets = batch['target_data'].to(self.device)
                
                # 前向传播
                if self.scaler is not None:
                    with autocast():
                        outputs = self.model(station_data, himawari_data)
                        data_loss = self.criterion(outputs, targets)
                        physics_loss_val = self.physics_loss(outputs, himawari_data)
                        loss = data_loss + 0.1 * physics_loss_val
                else:
                    outputs = self.model(station_data, himawari_data)
                    data_loss = self.criterion(outputs, targets)
                    physics_loss_val = self.physics_loss(outputs, himawari_data)
                    loss = data_loss + 0.1 * physics_loss_val
                
                total_loss += loss.item()
                physics_loss_total += physics_loss_val.item()
                num_batches += 1
                
                # 收集预测和目标用于计算指标
                all_predictions.append(outputs.cpu().numpy())
                all_targets.append(targets.cpu().numpy())
                
                # 更新进度条
                pbar.set_postfix({
                    'Val Loss': f'{loss.item():.6f}',
                    'Data Loss': f'{data_loss.item():.6f}',
                    'Physics Loss': f'{physics_loss_val.item():.6f}'
                })
        
        # 计算额外指标
        all_predictions = np.concatenate(all_predictions, axis=0)
        all_targets = np.concatenate(all_targets, axis=0)
        
        mse = mean_squared_error(all_targets.flatten(), all_predictions.flatten())
        mae = mean_absolute_error(all_targets.flatten(), all_predictions.flatten())
        rmse = np.sqrt(mse)
        r2 = r2_score(all_targets.flatten(), all_predictions.flatten())
        
        avg_loss = total_loss / num_batches
        avg_physics_loss = physics_loss_total / num_batches
        
        metrics = {
            'loss': avg_loss,
            'physics_loss': avg_physics_loss,
            'mse': mse,
            'mae': mae,
            'rmse': rmse,
            'r2': r2
        }
        
        return metrics
    
    def save_checkpoint(self, is_best=False):
        """保存检查点"""
        checkpoint = {
            'epoch': self.current_epoch,
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict() if self.scheduler else None,
            'best_val_loss': self.best_val_loss,
            'train_losses': self.train_losses,
            'val_losses': self.val_losses,
            'config': self.config
        }
        
        # 保存最新检查点
        checkpoint_path = os.path.join(
            self.config['training']['save_dir'], 
            f'checkpoint_epoch_{self.current_epoch}.pth'
        )
        torch.save(checkpoint, checkpoint_path)
        
        # 保存最佳模型
        if is_best:
            best_path = os.path.join(
                self.config['training']['save_dir'], 
                'best_model.pth'
            )
            torch.save(checkpoint, best_path)
            self.logger.info(f"Best model saved with validation loss: {self.best_val_loss:.6f}")
    
    def train(self):
        """主训练循环"""
        self.logger.info("Starting enhanced model training...")
        self.logger.info(f"Device: {self.device}")
        self.logger.info(f"Model parameters: {sum(p.numel() for p in self.model.parameters()):,}")
        
        epochs = self.config['training']['epochs']
        
        for epoch in range(epochs):
            self.current_epoch = epoch
            
            # 更新渐进式训练状态
            self.progressive_training.update_epoch(epoch)
            
            # 训练
            train_loss, train_physics_loss = self.train_epoch()
            
            # 验证
            val_frequency = self.config['validation'].get('val_frequency', 5)
            if epoch % val_frequency == 0:
                val_metrics = self.validate_epoch()
                val_loss = val_metrics['loss']
                
                # 记录损失
                self.train_losses.append(train_loss)
                self.val_losses.append(val_loss)
                
                # 日志记录
                self.logger.info(
                    f"Epoch {epoch}: Train Loss={train_loss:.6f}, "
                    f"Train Physics={train_physics_loss:.6f}, "
                    f"Val Loss={val_loss:.6f}, Val R2={val_metrics['r2']:.4f}"
                )
                
                # 学习率调度
                if self.scheduler:
                    if self.warmup_scheduler and epoch < self.warmup_epochs:
                        self.warmup_scheduler.step()
                    else:
                        self.scheduler.step()
                
                # 保存检查点
                is_best = val_loss < self.best_val_loss
                if is_best:
                    self.best_val_loss = val_loss
                
                save_frequency = self.config['logging']['checkpointing'].get('save_frequency', 10)
                if epoch % save_frequency == 0:
                    self.save_checkpoint(is_best)
                
                # 早停检查
                early_stopping = self.config['validation'].get('early_stopping', {})
                if early_stopping.get('enabled', False):
                    patience = early_stopping.get('patience', 15)
                    if len(self.val_losses) >= patience:
                        recent_losses = self.val_losses[-patience:]
                        if all(loss > self.best_val_loss + early_stopping.get('min_delta', 0.001) 
                               for loss in recent_losses):
                            self.logger.info(f"Early stopping triggered at epoch {epoch}")
                            break
        
        self.logger.info("Training completed!")
        self.logger.info(f"Best validation loss: {self.best_val_loss:.6f}")
        
        # 保存最终模型
        self.save_checkpoint()
        
        # 绘制训练曲线
        self.plot_training_curves()
    
    def plot_training_curves(self):
        """绘制训练曲线"""
        if not self.train_losses:
            return
        
        plt.figure(figsize=(12, 4))
        
        # 损失曲线
        plt.subplot(1, 2, 1)
        epochs = range(len(self.train_losses))
        plt.plot(epochs, self.train_losses, 'b-', label='Training Loss')
        plt.plot(epochs, self.val_losses, 'r-', label='Validation Loss')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.title('Training and Validation Loss')
        plt.legend()
        plt.grid(True)
        
        # 学习率曲线
        plt.subplot(1, 2, 2)
        lrs = [param_group['lr'] for param_group in self.optimizer.param_groups]
        plt.plot(epochs, [lrs[0]] * len(epochs), 'g-', label='Learning Rate')
        plt.xlabel('Epoch')
        plt.ylabel('Learning Rate')
        plt.title('Learning Rate Schedule')
        plt.legend()
        plt.grid(True)
        
        plt.tight_layout()
        
        # 保存图片
        save_path = os.path.join(
            self.config['training']['log_dir'], 
            f'training_curves_{datetime.now().strftime("%Y%m%d_%H%M%S")}.png'
        )
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        self.logger.info(f"Training curves saved to {save_path}")

def main():
    """主函数"""
    # 加载配置
    config_path = 'config_enhanced_cpu.yaml'
    with open(config_path, 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
    
    # 设置随机种子
    torch.manual_seed(42)
    np.random.seed(42)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(42)
        torch.cuda.manual_seed_all(42)
    
    # 创建训练器并开始训练
    trainer = EnhancedTrainer(config)
    trainer.train()

if __name__ == "__main__":
    main()
