#!/usr/bin/env python3
"""
多GPU训练设置测试脚本
用于验证多GPU训练配置和逻辑是否正确
"""

import torch
import torch.nn as nn
import yaml
import os
from datetime import datetime

def test_gpu_detection():
    """测试GPU检测功能"""
    print("=" * 50)
    print("GPU检测测试")
    print("=" * 50)
    
    print(f"CUDA可用: {torch.cuda.is_available()}")
    print(f"GPU数量: {torch.cuda.device_count()}")
    
    if torch.cuda.is_available():
        for i in range(torch.cuda.device_count()):
            print(f"GPU {i}: {torch.cuda.get_device_name(i)}")
            print(f"  内存: {torch.cuda.get_device_properties(i).total_memory / 1024**3:.1f} GB")
    else:
        print("未检测到GPU，将使用CPU模式")
    
    return torch.cuda.is_available(), torch.cuda.device_count()

def test_config_loading():
    """测试配置文件加载"""
    print("\n" + "=" * 50)
    print("配置文件测试")
    print("=" * 50)
    
    try:
        with open('config.yaml', 'r', encoding='utf-8') as f:
            config = yaml.safe_load(f)
        
        print("✓ 配置文件加载成功")
        
        # 检查多GPU配置
        multi_gpu_config = config['training'].get('multi_gpu', {})
        print(f"多GPU训练启用: {multi_gpu_config.get('enabled', False)}")
        print(f"多GPU策略: {multi_gpu_config.get('strategy', 'dp')}")
        
        # 检查自适应batch_size配置
        auto_batch_config = config['training'].get('auto_batch_size', {})
        print(f"自适应batch_size启用: {auto_batch_config.get('enabled', False)}")
        print(f"每GPU batch_size: {auto_batch_config.get('per_gpu_batch', 8)}")
        
        # 检查混合精度配置
        mixed_precision_config = config['training'].get('mixed_precision', {})
        print(f"混合精度训练启用: {mixed_precision_config.get('enabled', False)}")
        
        return config
        
    except Exception as e:
        print(f"✗ 配置文件加载失败: {e}")
        return None

def test_model_creation(config):
    """测试模型创建"""
    print("\n" + "=" * 50)
    print("模型创建测试")
    print("=" * 50)
    
    try:
        from models import CombinedModel
        
        model = CombinedModel(config)
        total_params = sum(p.numel() for p in model.parameters())
        trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
        
        print(f"✓ 模型创建成功")
        print(f"总参数数量: {total_params:,}")
        print(f"可训练参数: {trainable_params:,}")
        print(f"模型大小: {total_params * 4 / 1024**2:.1f} MB (FP32)")
        
        return model
        
    except Exception as e:
        print(f"✗ 模型创建失败: {e}")
        return None

def test_data_parallel(model):
    """测试DataParallel包装"""
    print("\n" + "=" * 50)
    print("DataParallel测试")
    print("=" * 50)
    
    if not torch.cuda.is_available():
        print("跳过DataParallel测试 (无GPU)")
        return model
    
    try:
        if torch.cuda.device_count() > 1:
            print(f"测试DataParallel包装 ({torch.cuda.device_count()}个GPU)")
            model_dp = nn.DataParallel(model)
            print("✓ DataParallel包装成功")
            
            # 测试前向传播
            device = torch.device("cuda:0")
            model_dp = model_dp.to(device)
            
            # 创建测试数据
            batch_size = 2
            station_data = torch.randn(batch_size, 96, 5).to(device)  # 5天历史数据，每天96个15分钟点
            himawari_data = torch.randn(batch_size, 3, 224, 224).to(device)  # 3波段卫星数据
            
            with torch.no_grad():
                output = model_dp(station_data, himawari_data)
            
            print(f"✓ 前向传播测试成功，输出形状: {output.shape}")
            return model_dp
        else:
            print("只有1个GPU，跳过DataParallel测试")
            return model
            
    except Exception as e:
        print(f"✗ DataParallel测试失败: {e}")
        return model

def test_mixed_precision():
    """测试混合精度训练"""
    print("\n" + "=" * 50)
    print("混合精度训练测试")
    print("=" * 50)
    
    try:
        from torch.cuda.amp import autocast, GradScaler
        
        print("✓ AMP模块导入成功")
        
        if torch.cuda.is_available():
            # 测试autocast
            device = torch.device("cuda:0")
            model = nn.Linear(10, 1).to(device)
            optimizer = torch.optim.Adam(model.parameters())
            scaler = GradScaler()
            
            # 创建测试数据
            x = torch.randn(8, 10).to(device)
            y = torch.randn(8, 1).to(device)
            
            # 测试混合精度训练步骤
            optimizer.zero_grad()
            with autocast():
                output = model(x)
                loss = nn.MSELoss()(output, y)
            
            scaler.scale(loss).backward()
            scaler.step(optimizer)
            scaler.update()
            
            print("✓ 混合精度训练步骤测试成功")
            return True
        else:
            print("跳过混合精度测试 (无GPU)")
            return False
            
    except ImportError:
        print("✗ AMP模块不可用")
        return False
    except Exception as e:
        print(f"✗ 混合精度测试失败: {e}")
        return False

def test_adaptive_batch_size(config):
    """测试自适应batch_size计算"""
    print("\n" + "=" * 50)
    print("自适应batch_size测试")
    print("=" * 50)
    
    try:
        # 模拟不同GPU数量的情况
        gpu_counts = [1, 2, 4, 8]
        
        auto_batch_config = config['training'].get('auto_batch_size', {})
        per_gpu_batch = auto_batch_config.get('per_gpu_batch', 8)
        max_total_batch = auto_batch_config.get('max_total_batch', 128)
        
        print(f"每GPU batch_size: {per_gpu_batch}")
        print(f"最大总batch_size: {max_total_batch}")
        print()
        
        for gpu_count in gpu_counts:
            total_batch = per_gpu_batch * gpu_count
            total_batch = min(total_batch, max_total_batch)
            print(f"GPU数量: {gpu_count:2d} -> 总batch_size: {total_batch:3d}")
        
        return True
        
    except Exception as e:
        print(f"✗ 自适应batch_size测试失败: {e}")
        return False

def generate_performance_report(config):
    """生成性能预期报告"""
    print("\n" + "=" * 50)
    print("性能预期报告")
    print("=" * 50)
    
    gpu_count = torch.cuda.device_count() if torch.cuda.is_available() else 0
    
    print(f"当前GPU数量: {gpu_count}")
    print()
    
    # 理论加速比
    if gpu_count > 0:
        print("理论加速比 (DataParallel):")
        for i in range(1, min(gpu_count + 1, 9)):
            if i == 1:
                speedup = 1.0
            else:
                # DataParallel的实际加速比通常低于理论值
                speedup = 1.0 + (i - 1) * 0.7  # 假设每个额外GPU提供70%的加速
            
            print(f"  {i} GPU: {speedup:.1f}x 加速")
        print()
        
        # 内存使用估算
        auto_batch_config = config['training'].get('auto_batch_size', {})
        per_gpu_batch = auto_batch_config.get('per_gpu_batch', 8)
        
        print(f"内存使用估算 (每GPU):")
        print(f"  Batch size: {per_gpu_batch}")
        print(f"  模型参数: ~111M ({111 * 4 / 1024:.1f} MB)")
        print(f"  梯度内存: ~111M ({111 * 4 / 1024:.1f} MB)")
        print(f"  激活内存: 估算 {per_gpu_batch * 50:.0f} MB")
        print(f"  总计: ~{111 * 8 / 1024 + per_gpu_batch * 50 / 1024:.1f} GB")
        print()
        
        # 训练时间估算
        epochs = config['training']['epochs']
        dataset_size = 1000  # 假设数据集大小
        batch_time_single = 0.5  # 单GPU每批次时间(秒)
        
        print(f"训练时间估算 ({epochs} epochs):")
        for i in range(1, min(gpu_count + 1, 5)):
            if i == 1:
                speedup = 1.0
            else:
                speedup = 1.0 + (i - 1) * 0.7
            
            batches_per_epoch = dataset_size // (per_gpu_batch * i)
            time_per_epoch = batches_per_epoch * batch_time_single / speedup / 3600  # 小时
            total_time = time_per_epoch * epochs
            
            print(f"  {i} GPU: {total_time:.1f} 小时")

def main():
    """主测试函数"""
    print("多GPU训练设置测试")
    print(f"测试时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    
    # 测试GPU检测
    has_gpu, gpu_count = test_gpu_detection()
    
    # 测试配置加载
    config = test_config_loading()
    if config is None:
        return
    
    # 测试模型创建
    model = test_model_creation(config)
    if model is None:
        return
    
    # 测试DataParallel
    model = test_data_parallel(model)
    
    # 测试混合精度
    test_mixed_precision()
    
    # 测试自适应batch_size
    test_adaptive_batch_size(config)
    
    # 生成性能报告
    generate_performance_report(config)
    
    print("\n" + "=" * 50)
    print("测试完成!")
    print("=" * 50)
    
    if has_gpu:
        print("✓ 系统支持GPU加速训练")
        if gpu_count > 1:
            print(f"✓ 检测到{gpu_count}个GPU，可使用多GPU训练")
        else:
            print("⚠ 检测到1个GPU，只能使用单GPU训练")
    else:
        print("⚠ 未检测到GPU，将使用CPU训练")
    
    print("\n使用方法:")
    print("1. 单GPU训练: python train.py")
    print("2. 多GPU训练: python train.py (自动检测)")
    print("3. 指定配置: python train.py --config config.yaml")
    print("4. 恢复训练: python train.py --resume checkpoints/best_model.pth")
    print("5. 调整参数: python train.py --epochs 50 --batch-size 32")

if __name__ == "__main__":
    main()
