#!/usr/bin/env python3
"""
简化版增强气象预测模型训练脚本
避免网络连接问题，使用本地模型
"""

import os
import sys
import yaml
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
from tqdm import tqdm
import logging
from datetime import datetime
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import warnings
warnings.filterwarnings('ignore')

# 导入简化模型和数据处理
from models_enhanced_simple import EnhancedCombinedModel, create_enhanced_model
from data_utils_final import WeatherDataset, collate_fn

class SimpleTrainer:
    """简化版训练器"""
    def __init__(self, config):
        self.config = config
        self.device = torch.device(config['training']['device'] if torch.cuda.is_available() else 'cpu')
        
        # 设置日志
        self.setup_logging()
        
        # 创建模型
        self.model = create_enhanced_model(config)
        self.model.to(self.device)
        
        # 优化器
        self.optimizer = optim.AdamW(
            self.model.parameters(),
            lr=config['training']['lr'],
            weight_decay=config['training']['weight_decay']
        )
        
        # 学习率调度器
        self.scheduler = optim.lr_scheduler.StepLR(
            self.optimizer,
            step_size=10,
            gamma=0.5
        )
        
        # 损失函数
        self.criterion = nn.MSELoss()
        
        # 数据加载器
        self.setup_data_loaders()
        
        # 训练状态
        self.current_epoch = 0
        self.best_val_loss = float('inf')
        self.train_losses = []
        self.val_losses = []
        
        # 创建保存目录
        os.makedirs(config['training']['save_dir'], exist_ok=True)
        os.makedirs(config['training']['log_dir'], exist_ok=True)
    
    def setup_logging(self):
        """设置日志"""
        log_dir = self.config['training']['log_dir']
        os.makedirs(log_dir, exist_ok=True)
        
        log_file = os.path.join(log_dir, f"training_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log")
        
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(levelname)s - %(message)s',
            handlers=[
                logging.FileHandler(log_file),
                logging.StreamHandler(sys.stdout)
            ]
        )
        self.logger = logging.getLogger(__name__)
    
    def setup_data_loaders(self):
        """设置数据加载器"""
        # 创建数据集
        train_dataset = WeatherDataset(
            station_csv=self.config['data']['station_csv'],
            himawari_dir=self.config['data']['himawari_dir'],
            historical_days=self.config['data']['historical_days'],
            future_hours=self.config['data']['future_hours']
        )
        
        val_dataset = WeatherDataset(
            station_csv=self.config['data']['station_csv'],
            himawari_dir=self.config['data']['himawari_dir'],
            historical_days=self.config['data']['historical_days'],
            future_hours=self.config['data']['future_hours']
        )
        
        # 简单分割训练和验证集
        total_size = len(train_dataset)
        train_size = int(0.8 * total_size)
        val_size = total_size - train_size
        
        train_indices = list(range(train_size))
        val_indices = list(range(train_size, total_size))
        
        train_dataset = torch.utils.data.Subset(train_dataset, train_indices)
        val_dataset = torch.utils.data.Subset(val_dataset, val_indices)
        
        # 数据加载器
        batch_size = self.config['training']['batch_size']
        num_workers = self.config['training']['num_workers']
        
        self.train_loader = DataLoader(
            train_dataset,
            batch_size=batch_size,
            shuffle=True,
            num_workers=num_workers,
            collate_fn=collate_fn,
            pin_memory=True
        )
        
        self.val_loader = DataLoader(
            val_dataset,
            batch_size=batch_size,
            shuffle=False,
            num_workers=num_workers,
            collate_fn=collate_fn,
            pin_memory=True
        )
        
        self.logger.info(f"Training samples: {len(train_dataset)}")
        self.logger.info(f"Validation samples: {len(val_dataset)}")
    
    def train_epoch(self):
        """训练一个epoch"""
        self.model.train()
        total_loss = 0
        num_batches = 0
        
        pbar = tqdm(self.train_loader, desc=f"Training Epoch {self.current_epoch}")
        
        for batch_idx, batch in enumerate(pbar):
            # 数据移动到设备
            station_data = batch['station_data'].to(self.device)
            himawari_data = batch['himawari_data'].to(self.device)
            targets = batch['target_data'].to(self.device)
            
            # 前向传播
            self.optimizer.zero_grad()
            
            outputs = self.model(station_data, himawari_data)
            loss = self.criterion(outputs, targets)
            
            # 反向传播
            loss.backward()
            
            # 梯度裁剪
            if self.config['training'].get('gradient_clip', 0) > 0:
                torch.nn.utils.clip_grad_norm_(
                    self.model.parameters(), 
                    self.config['training']['gradient_clip']
                )
            
            self.optimizer.step()
            
            total_loss += loss.item()
            num_batches += 1
            
            # 更新进度条
            pbar.set_postfix({
                'Loss': f'{loss.item():.6f}',
                'LR': f'{self.optimizer.param_groups[0]["lr"]:.2e}'
            })
        
        avg_loss = total_loss / num_batches
        return avg_loss
    
    def validate_epoch(self):
        """验证一个epoch"""
        self.model.eval()
        total_loss = 0
        num_batches = 0
        
        all_predictions = []
        all_targets = []
        
        with torch.no_grad():
            pbar = tqdm(self.val_loader, desc=f"Validation Epoch {self.current_epoch}")
            
            for batch in pbar:
                # 数据移动到设备
                station_data = batch['station_data'].to(self.device)
                himawari_data = batch['himawari_data'].to(self.device)
                targets = batch['target_data'].to(self.device)
                
                # 前向传播
                outputs = self.model(station_data, himawari_data)
                loss = self.criterion(outputs, targets)
                
                total_loss += loss.item()
                num_batches += 1
                
                # 收集预测和目标用于计算指标
                all_predictions.append(outputs.cpu().numpy())
                all_targets.append(targets.cpu().numpy())
                
                # 更新进度条
                pbar.set_postfix({
                    'Val Loss': f'{loss.item():.6f}'
                })
        
        # 计算额外指标
        all_predictions = np.concatenate(all_predictions, axis=0)
        all_targets = np.concatenate(all_targets, axis=0)
        
        mse = mean_squared_error(all_targets.flatten(), all_predictions.flatten())
        mae = mean_absolute_error(all_targets.flatten(), all_predictions.flatten())
        rmse = np.sqrt(mse)
        r2 = r2_score(all_targets.flatten(), all_predictions.flatten())
        
        avg_loss = total_loss / num_batches
        
        metrics = {
            'loss': avg_loss,
            'mse': mse,
            'mae': mae,
            'rmse': rmse,
            'r2': r2
        }
        
        return metrics
    
    def save_checkpoint(self, is_best=False):
        """保存检查点"""
        checkpoint = {
            'epoch': self.current_epoch,
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict(),
            'best_val_loss': self.best_val_loss,
            'train_losses': self.train_losses,
            'val_losses': self.val_losses,
            'config': self.config
        }
        
        # 保存最新检查点
        checkpoint_path = os.path.join(
            self.config['training']['save_dir'], 
            f'checkpoint_epoch_{self.current_epoch}.pth'
        )
        torch.save(checkpoint, checkpoint_path)
        
        # 保存最佳模型
        if is_best:
            best_path = os.path.join(
                self.config['training']['save_dir'], 
                'best_model.pth'
            )
            torch.save(checkpoint, best_path)
            self.logger.info(f"Best model saved with validation loss: {self.best_val_loss:.6f}")
    
    def train(self):
        """主训练循环"""
        self.logger.info("Starting simple enhanced model training...")
        self.logger.info(f"Device: {self.device}")
        self.logger.info(f"Model parameters: {sum(p.numel() for p in self.model.parameters()):,}")
        
        epochs = self.config['training']['epochs']
        
        for epoch in range(epochs):
            self.current_epoch = epoch
            
            # 训练
            train_loss = self.train_epoch()
            
            # 验证
            val_frequency = self.config['validation'].get('val_frequency', 2)
            if epoch % val_frequency == 0:
                val_metrics = self.validate_epoch()
                val_loss = val_metrics['loss']
                
                # 记录损失
                self.train_losses.append(train_loss)
                self.val_losses.append(val_loss)
                
                # 日志记录
                self.logger.info(
                    f"Epoch {epoch}: Train Loss={train_loss:.6f}, "
                    f"Val Loss={val_loss:.6f}, Val R2={val_metrics['r2']:.4f}, "
                    f"Val RMSE={val_metrics['rmse']:.4f}"
                )
                
                # 学习率调度
                self.scheduler.step()
                
                # 保存检查点
                is_best = val_loss < self.best_val_loss
                if is_best:
                    self.best_val_loss = val_loss
                
                save_frequency = self.config['logging']['checkpointing'].get('save_frequency', 5)
                if epoch % save_frequency == 0:
                    self.save_checkpoint(is_best)
                
                # 早停检查
                early_stopping = self.config['validation'].get('early_stopping', {})
                if early_stopping.get('enabled', False):
                    patience = early_stopping.get('patience', 5)
                    if len(self.val_losses) >= patience:
                        recent_losses = self.val_losses[-patience:]
                        if all(loss > self.best_val_loss + early_stopping.get('min_delta', 0.001) 
                               for loss in recent_losses):
                            self.logger.info(f"Early stopping triggered at epoch {epoch}")
                            break
        
        self.logger.info("Training completed!")
        self.logger.info(f"Best validation loss: {self.best_val_loss:.6f}")
        
        # 保存最终模型
        self.save_checkpoint()
        
        # 绘制训练曲线
        self.plot_training_curves()
    
    def plot_training_curves(self):
        """绘制训练曲线"""
        if not self.train_losses:
            return
        
        plt.figure(figsize=(12, 4))
        
        # 损失曲线
        plt.subplot(1, 2, 1)
        epochs = range(len(self.train_losses))
        plt.plot(epochs, self.train_losses, 'b-', label='Training Loss')
        plt.plot(epochs, self.val_losses, 'r-', label='Validation Loss')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.title('Training and Validation Loss')
        plt.legend()
        plt.grid(True)
        
        # 学习率曲线
        plt.subplot(1, 2, 2)
        lrs = [param_group['lr'] for param_group in self.optimizer.param_groups]
        plt.plot(epochs, [lrs[0]] * len(epochs), 'g-', label='Learning Rate')
        plt.xlabel('Epoch')
        plt.ylabel('Learning Rate')
        plt.title('Learning Rate Schedule')
        plt.legend()
        plt.grid(True)
        
        plt.tight_layout()
        
        # 保存图片
        save_path = os.path.join(
            self.config['training']['log_dir'], 
            f'training_curves_{datetime.now().strftime("%Y%m%d_%H%M%S")}.png'
        )
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        self.logger.info(f"Training curves saved to {save_path}")

def main():
    """主函数"""
    # 加载配置
    config_path = 'config_enhanced_cpu.yaml'
    with open(config_path, 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
    
    # 设置随机种子
    torch.manual_seed(42)
    np.random.seed(42)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(42)
        torch.cuda.manual_seed_all(42)
    
    # 创建训练器并开始训练
    trainer = SimpleTrainer(config)
    trainer.train()

if __name__ == "__main__":
    main()
