#!/usr/bin/env python3
"""
自动GPU训练脚本 - 智能检测GPU/CPU，支持多GPU训练
针对8个RTX 4090优化，自动回退到CPU
"""

import os
import sys
import yaml
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.cuda.amp import GradScaler, autocast
import logging
import argparse
from datetime import datetime
import json
import numpy as np
from typing import Dict, Any, Optional

# 导入项目模块
from data_utils_final import WeatherDataset, load_config, collate_fn
from models_enhanced_fixed import create_enhanced_model
from physics_constraints import PhysicalLossConstraints

def setup_logging(log_dir: str = "logs_auto"):
    """设置日志"""
    os.makedirs(log_dir, exist_ok=True)
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    log_file = os.path.join(log_dir, f"training_{timestamp}.log")
    
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler(log_file),
            logging.StreamHandler(sys.stdout)
        ]
    )
    return logging.getLogger(__name__)

class DeviceManager:
    """设备管理器 - 自动检测和配置GPU/CPU"""
    
    def __init__(self):
        self.device = self._detect_device()
        self.num_gpus = self._count_gpus()
        self.multi_gpu = self.num_gpus > 1 and torch.cuda.is_available()
        self.use_cuda = torch.cuda.is_available()
        
    def _detect_device(self) -> torch.device:
        """自动检测设备"""
        if torch.cuda.is_available():
            return torch.device('cuda')
        else:
            return torch.device('cpu')
    
    def _count_gpus(self) -> int:
        """计算可用GPU数量"""
        if torch.cuda.is_available():
            return torch.cuda.device_count()
        return 0
    
    def get_device_info(self) -> Dict[str, Any]:
        """获取设备信息"""
        info = {
            'device': str(self.device),
            'use_cuda': self.use_cuda,
            'num_gpus': self.num_gpus,
            'multi_gpu': self.multi_gpu
        }
        
        if self.use_cuda:
            info['gpu_names'] = []
            for i in range(self.num_gpus):
                gpu_name = torch.cuda.get_device_name(i)
                gpu_memory = torch.cuda.get_device_properties(i).total_memory / 1024**3
                info['gpu_names'].append(f"{gpu_name} ({gpu_memory:.1f}GB)")
        
        return info
    
    def setup_model(self, model: nn.Module) -> nn.Module:
        """设置模型到合适的设备"""
        model = model.to(self.device)
        
        if self.multi_gpu:
            # 使用DataParallel进行多GPU训练
            model = nn.DataParallel(model)
            logging.info(f"使用DataParallel进行多GPU训练，GPU数量: {self.num_gpus}")
        elif self.use_cuda:
            logging.info(f"使用单GPU训练: {torch.cuda.get_device_name(0)}")
        else:
            logging.info("使用CPU训练")
        
        return model

class AutoConfig:
    """自动配置管理器"""
    
    def __init__(self, config_path: str = "config_auto.yaml", device_manager: DeviceManager = None):
        self.config_path = config_path
        self.device_manager = device_manager or DeviceManager()
        self.config = self._load_or_create_config()
    
    def _load_or_create_config(self) -> Dict[str, Any]:
        """加载或创建配置"""
        if os.path.exists(self.config_path):
            with open(self.config_path, 'r', encoding='utf-8') as f:
                config = yaml.safe_load(f)
            logging.info(f"加载配置文件: {self.config_path}")
        else:
            config = self._create_default_config()
            self._save_config(config)
            logging.info(f"创建默认配置文件: {self.config_path}")
        
        # 自动调整配置
        config = self._auto_adjust_config(config)
        return config
    
    def _create_default_config(self) -> Dict[str, Any]:
        """创建默认配置"""
        return {
            'data': {
                'station_data_path': 'station_data.csv',
                'satellite_data_dir': 'data/himawari',
                'sequence_length': 12,
                'prediction_horizon': 6,
                'batch_size': 16,
                'num_workers': 4,
                'train_ratio': 0.8,
                'val_ratio': 0.1,
                'test_ratio': 0.1
            },
            'model': {
                'vit_model': 'vit_base_patch16_224',
                'vit_pretrained': False,
                'hidden_dim': 768,
                'num_heads': 12,
                'num_layers': 6,
                'lstm_hidden_dim': 256,
                'lstm_num_layers': 2,
                'fusion_dim': 512,
                'physics_constraint_weight': 0.1
            },
            'training': {
                'epochs': 50,
                'lr': 1e-4,
                'weight_decay': 1e-5,
                'device': 'auto',
                'mixed_precision': True,
                'gradient_clip': 1.0,
                'early_stopping_patience': 10,
                'save_interval': 5
            },
            'optimization': {
                'accumulate_grad_batches': 1,
                'warmup_epochs': 5,
                'scheduler_type': 'cosine'
            }
        }
    
    def _auto_adjust_config(self, config: Dict[str, Any]) -> Dict[str, Any]:
        """根据硬件自动调整配置"""
        device_info = self.device_manager.get_device_info()
        
        # 根据GPU数量和类型调整batch_size
        if device_info['use_cuda']:
            if device_info['num_gpus'] >= 8:
                # 8个GPU，每个GPU batch_size=32，总batch_size=256
                config['data']['batch_size'] = 32
                config['optimization']['accumulate_grad_batches'] = 1
                logging.info("检测到8个GPU，设置batch_size=32 per GPU")
            elif device_info['num_gpus'] >= 4:
                # 4个GPU，每个GPU batch_size=24，总batch_size=96
                config['data']['batch_size'] = 24
                config['optimization']['accumulate_grad_batches'] = 1
                logging.info("检测到4个GPU，设置batch_size=24 per GPU")
            elif device_info['num_gpus'] >= 2:
                # 2个GPU，每个GPU batch_size=16，总batch_size=32
                config['data']['batch_size'] = 16
                config['optimization']['accumulate_grad_batches'] = 1
                logging.info("检测到2个GPU，设置batch_size=16 per GPU")
            else:
                # 1个GPU，batch_size=16
                config['data']['batch_size'] = 16
                config['optimization']['accumulate_grad_batches'] = 2
                logging.info("检测到1个GPU，设置batch_size=16, accumulate_grad_batches=2")
            
            # 启用混合精度训练
            config['training']['mixed_precision'] = True
            
            # 调整num_workers
            config['data']['num_workers'] = min(8, os.cpu_count())
        else:
            # CPU训练，减小batch_size
            config['data']['batch_size'] = 8
            config['optimization']['accumulate_grad_batches'] = 4
            config['training']['mixed_precision'] = False
            config['data']['num_workers'] = 0
            logging.info("CPU训练，使用较小batch_size")
        
        return config
    
    def _save_config(self, config: Dict[str, Any]):
        """保存配置文件"""
        with open(self.config_path, 'w', encoding='utf-8') as f:
            yaml.dump(config, f, default_flow_style=False, allow_unicode=True)

class AutoTrainer:
    """自动训练器"""
    
    def __init__(self, config_path: str = "config_auto.yaml"):
        self.logger = setup_logging()
        self.device_manager = DeviceManager()
        self.config_manager = AutoConfig(config_path, self.device_manager)
        self.config = self.config_manager.config
        
        # 打印设备信息
        device_info = self.device_manager.get_device_info()
        self.logger.info("=" * 60)
        self.logger.info("设备信息:")
        for key, value in device_info.items():
            self.logger.info(f"  {key}: {value}")
        self.logger.info("=" * 60)
        
        # 初始化组件
        self.model = None
        self.train_loader = None
        self.val_loader = None
        self.test_loader = None
        self.optimizer = None
        self.scheduler = None
        self.criterion = None
        self.physics_loss = None
        self.scaler = None
        
        # 训练状态
        self.current_epoch = 0
        self.best_val_loss = float('inf')
        self.early_stopping_counter = 0
        
    def setup_model(self):
        """设置模型"""
        self.logger.info("初始化模型...")
        self.model = create_enhanced_model(self.config)
        self.model = self.device_manager.setup_model(self.model)
        
        # 打印模型参数数量
        total_params = sum(p.numel() for p in self.model.parameters())
        trainable_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
        self.logger.info(f"模型总参数: {total_params:,}")
        self.logger.info(f"可训练参数: {trainable_params:,}")
        
    def setup_data(self):
        """设置数据加载器"""
        self.logger.info("准备数据加载器...")
        try:
            # 创建数据集
            full_dataset = WeatherDataset(
                station_csv=self.config['data']['station_data_path'],
                himawari_dir=self.config['data']['satellite_data_dir'],
                historical_days=max(1, self.config['data']['sequence_length']),  # 确保至少1天
                future_hours=self.config['data']['prediction_horizon']
            )
            
            # 计算数据集大小
            total_size = len(full_dataset)
            train_size = int(total_size * self.config['data']['train_ratio'])
            val_size = int(total_size * self.config['data']['val_ratio'])
            test_size = total_size - train_size - val_size
            
            # 分割数据集
            train_dataset, val_dataset, test_dataset = torch.utils.data.random_split(
                full_dataset, [train_size, val_size, test_size]
            )
            
            # 创建数据加载器
            self.train_loader = DataLoader(
                train_dataset,
                batch_size=self.config['data']['batch_size'],
                shuffle=True,
                num_workers=self.config['data']['num_workers'],
                collate_fn=collate_fn,
                pin_memory=self.device_manager.use_cuda
            )
            
            self.val_loader = DataLoader(
                val_dataset,
                batch_size=self.config['data']['batch_size'],
                shuffle=False,
                num_workers=self.config['data']['num_workers'],
                collate_fn=collate_fn,
                pin_memory=self.device_manager.use_cuda
            )
            
            self.test_loader = DataLoader(
                test_dataset,
                batch_size=self.config['data']['batch_size'],
                shuffle=False,
                num_workers=self.config['data']['num_workers'],
                collate_fn=collate_fn,
                pin_memory=self.device_manager.use_cuda
            )
            
            self.logger.info(f"训练样本批次数: {len(self.train_loader)}")
            self.logger.info(f"验证样本批次数: {len(self.val_loader)}")
            self.logger.info(f"测试样本批次数: {len(self.test_loader)}")
            
        except Exception as e:
            self.logger.error(f"数据加载失败: {e}")
            raise
    
    def setup_training(self):
        """设置训练组件"""
        self.logger.info("设置训练组件...")
        
        # 优化器 - 确保学习率是数值类型
        lr = float(self.config['training']['lr'])
        weight_decay = float(self.config['training']['weight_decay'])
        
        self.optimizer = optim.AdamW(
            self.model.parameters(),
            lr=lr,
            weight_decay=weight_decay
        )
        
        # 学习率调度器
        if self.config['optimization']['scheduler_type'] == 'cosine':
            self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer, 
                T_max=self.config['training']['epochs']
            )
        else:
            self.scheduler = optim.lr_scheduler.StepLR(
                self.optimizer,
                step_size=10,
                gamma=0.5
            )
        
        # 损失函数
        self.criterion = nn.MSELoss()
        # 创建简化的物理约束损失函数
        self.physics_loss = lambda pred, batch: torch.tensor(0.0, device=pred.device)  # 暂时禁用物理约束
        
        # 混合精度训练
        if self.config['training']['mixed_precision'] and self.device_manager.use_cuda:
            self.scaler = GradScaler()
            self.logger.info("启用混合精度训练")
        
    def train_epoch(self) -> float:
        """训练一个epoch"""
        self.model.train()
        total_loss = 0.0
        num_batches = 0
        
        for batch_idx, batch in enumerate(self.train_loader):
            # 数据移到设备
            if isinstance(batch, dict):
                for key in batch:
                    if isinstance(batch[key], torch.Tensor):
                        batch[key] = batch[key].to(self.device_manager.device)
            else:
                batch = batch.to(self.device_manager.device)
            
            # 前向传播
            self.optimizer.zero_grad()
            
            if self.scaler is not None:
                with autocast():
                    # 正确提取数据
                    station_data = batch['station_data']  # [batch_size, seq_len, 3]
                    himawari_data = batch['himawari_data']  # [batch_size, channels, 224, 224]
                    targets = batch['target_data']  # [batch_size, future_len, 3]
                    
                    outputs = self.model(station_data, himawari_data)
                    if isinstance(outputs, dict):
                        pred = outputs['predictions']
                    else:
                        pred = outputs
                    
                    # 计算损失
                    data_loss = self.criterion(pred, targets)
                    physics_loss_val = self.physics_loss(pred, batch)
                    total_loss_batch = data_loss + self.config['model']['physics_constraint_weight'] * physics_loss_val
                
                # 反向传播
                self.scaler.scale(total_loss_batch).backward()
                
                # 梯度裁剪
                if self.config['training']['gradient_clip'] > 0:
                    self.scaler.unscale_(self.optimizer)
                    torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config['training']['gradient_clip'])
                
                self.scaler.step(self.optimizer)
                self.scaler.update()
            else:
                # 正确提取数据（CPU模式）
                station_data = batch['station_data']  # [batch_size, seq_len, 3]
                himawari_data = batch['himawari_data']  # [batch_size, channels, 224, 224]
                targets = batch['target_data']  # [batch_size, future_len, 3]
                
                outputs = self.model(station_data, himawari_data)
                if isinstance(outputs, dict):
                    pred = outputs['predictions']
                else:
                    pred = outputs
                
                data_loss = self.criterion(pred, targets)
                physics_loss_val = self.physics_loss(pred, batch)
                total_loss_batch = data_loss + self.config['model']['physics_constraint_weight'] * physics_loss_val
                
                total_loss_batch.backward()
                
                if self.config['training']['gradient_clip'] > 0:
                    torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config['training']['gradient_clip'])
                
                self.optimizer.step()
            
            total_loss += total_loss_batch.item()
            num_batches += 1
            
            # 打印进度
            if batch_idx % 50 == 0:
                self.logger.info(f"Epoch {self.current_epoch}, Batch {batch_idx}/{len(self.train_loader)}, Loss: {total_loss_batch.item():.6f}")
        
        return total_loss / num_batches
    
    def validate_epoch(self) -> float:
        """验证一个epoch"""
        self.model.eval()
        total_loss = 0.0
        num_batches = 0
        
        with torch.no_grad():
            for batch in self.val_loader:
                # 数据移到设备
                if isinstance(batch, dict):
                    for key in batch:
                        if isinstance(batch[key], torch.Tensor):
                            batch[key] = batch[key].to(self.device_manager.device)
                else:
                    batch = batch.to(self.device_manager.device)
                
                # 前向传播
                if self.scaler is not None:
                    with autocast():
                        # 正确提取数据
                        station_data = batch['station_data']  # [batch_size, seq_len, 3]
                        himawari_data = batch['himawari_data']  # [batch_size, channels, 224, 224]
                        targets = batch['target_data']  # [batch_size, future_len, 3]
                        
                        outputs = self.model(station_data, himawari_data)
                        if isinstance(outputs, dict):
                            pred = outputs['predictions']
                        else:
                            pred = outputs
                        
                        data_loss = self.criterion(pred, targets)
                        physics_loss_val = self.physics_loss(pred, batch)
                        total_loss_batch = data_loss + self.config['model']['physics_constraint_weight'] * physics_loss_val
                else:
                    # 正确提取数据（CPU模式）
                    station_data = batch['station_data']  # [batch_size, seq_len, 3]
                    himawari_data = batch['himawari_data']  # [batch_size, channels, 224, 224]
                    targets = batch['target_data']  # [batch_size, future_len, 3]
                    
                    outputs = self.model(station_data, himawari_data)
                    if isinstance(outputs, dict):
                        pred = outputs['predictions']
                    else:
                        pred = outputs
                    
                    data_loss = self.criterion(pred, targets)
                    physics_loss_val = self.physics_loss(pred, batch)
                    total_loss_batch = data_loss + self.config['model']['physics_constraint_weight'] * physics_loss_val
                
                total_loss += total_loss_batch.item()
                num_batches += 1
        
        return total_loss / num_batches
    
    def save_checkpoint(self, is_best: bool = False):
        """保存检查点"""
        checkpoint_dir = "checkpoints_auto"
        os.makedirs(checkpoint_dir, exist_ok=True)
        
        checkpoint = {
            'epoch': self.current_epoch,
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict(),
            'best_val_loss': self.best_val_loss,
            'config': self.config
        }
        
        if self.scaler is not None:
            checkpoint['scaler_state_dict'] = self.scaler.state_dict()
        
        # 保存最新检查点
        checkpoint_path = os.path.join(checkpoint_dir, 'latest_checkpoint.pth')
        torch.save(checkpoint, checkpoint_path)
        
        # 保存最佳检查点
        if is_best:
            best_path = os.path.join(checkpoint_dir, 'best_checkpoint.pth')
            torch.save(checkpoint, best_path)
            self.logger.info(f"保存最佳检查点: {best_path}")
        
        # 定期保存
        if self.current_epoch % self.config['training']['save_interval'] == 0:
            epoch_path = os.path.join(checkpoint_dir, f'checkpoint_epoch_{self.current_epoch}.pth')
            torch.save(checkpoint, epoch_path)
    
    def train(self):
        """主训练循环"""
        self.logger.info("开始训练...")
        
        for epoch in range(self.config['training']['epochs']):
            self.current_epoch = epoch
            
            # 训练
            train_loss = self.train_epoch()
            
            # 验证
            val_loss = self.validate_epoch()
            
            # 更新学习率
            self.scheduler.step()
            
            # 打印epoch结果
            self.logger.info(f"Epoch {epoch}: Train Loss = {train_loss:.6f}, Val Loss = {val_loss:.6f}, LR = {self.scheduler.get_last_lr()[0]:.8f}")
            
            # 保存检查点
            is_best = val_loss < self.best_val_loss
            if is_best:
                self.best_val_loss = val_loss
                self.early_stopping_counter = 0
            else:
                self.early_stopping_counter += 1
            
            self.save_checkpoint(is_best)
            
            # 早停检查
            if self.early_stopping_counter >= self.config['training']['early_stopping_patience']:
                self.logger.info(f"早停触发，在epoch {epoch}停止训练")
                break
        
        self.logger.info("训练完成!")
        self.logger.info(f"最佳验证损失: {self.best_val_loss:.6f}")

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='自动GPU训练脚本')
    parser.add_argument('--config', type=str, default='config_auto.yaml', help='配置文件路径')
    parser.add_argument('--resume', type=str, help='恢复训练的检查点路径')
    args = parser.parse_args()
    
    # 创建训练器
    trainer = AutoTrainer(args.config)
    
    try:
        # 设置组件
        trainer.setup_model()
        trainer.setup_data()
        trainer.setup_training()
        
        # 恢复训练（如果指定）
        if args.resume:
            checkpoint = torch.load(args.resume, map_location=trainer.device_manager.device)
            trainer.model.load_state_dict(checkpoint['model_state_dict'])
            trainer.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            trainer.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
            trainer.current_epoch = checkpoint['epoch']
            trainer.best_val_loss = checkpoint['best_val_loss']
            
            if trainer.scaler is not None and 'scaler_state_dict' in checkpoint:
                trainer.scaler.load_state_dict(checkpoint['scaler_state_dict'])
            
            logging.info(f"从epoch {trainer.current_epoch}恢复训练")
        
        # 开始训练
        trainer.train()
        
    except Exception as e:
        logging.error(f"训练过程中发生错误: {e}")
        raise

if __name__ == "__main__":
    main()
