#!/usr/bin/env python3
"""
增强版模型测试脚本
验证模型架构、前向传播和性能对比
"""

import os
import sys
import yaml
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import time
import psutil
import gc
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')

# 导入模型
from models_enhanced import EnhancedCombinedModel, create_enhanced_model
from models import CombinedModel
from data_utils_final import WeatherDataset, collate_fn

class ModelTester:
    """模型测试器"""
    def __init__(self, config_path='config_enhanced.yaml'):
        # 加载配置
        with open(config_path, 'r', encoding='utf-8') as f:
            self.config = yaml.safe_load(f)
        
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        print(f"Using device: {self.device}")
        
        # 创建测试数据
        self.create_test_data()
        
        # 测试结果存储
        self.test_results = {}
    
    def create_test_data(self):
        """创建测试数据"""
        batch_size = 2
        seq_len = 480  # 5天 * 24小时 * 4个15分钟点
        input_dim = 3
        num_channels = 4
        height, width = 224, 224
        
        # 创建随机测试数据
        self.station_data = torch.randn(batch_size, seq_len, input_dim)
        self.himawari_data = torch.randn(batch_size, num_channels, height, width)
        
        print(f"Test data created:")
        print(f"  Station data shape: {self.station_data.shape}")
        print(f"  Himawari data shape: {self.himawari_data.shape}")
    
    def test_model_architecture(self):
        """测试模型架构"""
        print("\n" + "="*60)
        print("🏗️  测试模型架构")
        print("="*60)
        
        # 测试不同ViT配置
        vit_configs = ['vit_base', 'vit_large', 'vit_huge']
        
        for vit_config in vit_configs:
            print(f"\n📋 测试 {vit_config} 配置:")
            
            try:
                # 更新配置
                test_config = self.config.copy()
                test_config['model']['vit_size'] = vit_config
                
                # 创建模型
                model = create_enhanced_model(test_config)
                model.to(self.device)
                
                # 计算参数数量
                total_params = sum(p.numel() for p in model.parameters())
                trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
                
                # 测试前向传播
                model.eval()
                with torch.no_grad():
                    start_time = time.time()
                    
                    station_data = self.station_data.to(self.device)
                    himawari_data = self.himawari_data.to(self.device)
                    
                    output = model(station_data, himawari_data)
                    
                    forward_time = time.time() - start_time
                
                # 验证输出形状
                expected_shape = (self.station_data.shape[0], 
                                self.config['data']['future_hours'] * 4, 
                                input_dim)
                
                shape_correct = output.shape == expected_shape
                
                # 内存使用
                if torch.cuda.is_available():
                    memory_used = torch.cuda.memory_allocated() / 1024**3  # GB
                    torch.cuda.empty_cache()
                else:
                    memory_used = 0
                
                # 存储结果
                self.test_results[vit_config] = {
                    'total_params': total_params,
                    'trainable_params': trainable_params,
                    'forward_time': forward_time,
                    'memory_used': memory_used,
                    'output_shape': output.shape,
                    'shape_correct': shape_correct,
                    'success': True
                }
                
                print(f"  ✅ 成功")
                print(f"     总参数: {total_params:,}")
                print(f"     可训练参数: {trainable_params:,}")
                print(f"     前向传播时间: {forward_time:.4f}s")
                print(f"     内存使用: {memory_used:.2f}GB")
                print(f"     输出形状: {output.shape}")
                print(f"     形状正确: {shape_correct}")
                
            except Exception as e:
                print(f"  ❌ 失败: {str(e)}")
                self.test_results[vit_config] = {'success': False, 'error': str(e)}
            
            # 清理内存
            gc.collect()
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
    
    def test_model_components(self):
        """测试模型组件"""
        print("\n" + "="*60)
        print("🔧 测试模型组件")
        print("="*60)
        
        from models_enhanced import (
            TemporalSequenceEncoder, 
            SpatioTemporalFusion, 
            AdaptivePhysicsConstraints,
            EnhancedVitAssimilation,
            EnhancedUNetDownscale
        )
        
        # 测试时间序列编码器
        print("\n📊 测试 TemporalSequenceEncoder:")
        try:
            temporal_encoder = TemporalSequenceEncoder(
                input_dim=3,
                hidden_dim=512,
                num_layers=3
            ).to(self.device)
            
            with torch.no_grad():
                temporal_output = temporal_encoder(self.station_data.to(self.device))
            
            print(f"  ✅ 输入形状: {self.station_data.shape}")
            print(f"  ✅ 输出形状: {temporal_output.shape}")
            print(f"  ✅ 参数数量: {sum(p.numel() for p in temporal_encoder.parameters()):,}")
            
        except Exception as e:
            print(f"  ❌ 失败: {str(e)}")
        
        # 测试ViT同化模块
        print("\n🖼️  测试 EnhancedVitAssimilation:")
        try:
            vit_assimilation = EnhancedVitAssimilation(
                num_channels=4,
                vit_config='vit_large'
            ).to(self.device)
            
            with torch.no_grad():
                vit_output = vit_assimilation(self.himawari_data.to(self.device))
            
            print(f"  ✅ 输入形状: {self.himawari_data.shape}")
            print(f"  ✅ 输出形状: {vit_output.shape}")
            print(f"  ✅ 参数数量: {sum(p.numel() for p in vit_assimilation.parameters()):,}")
            
        except Exception as e:
            print(f"  ❌ 失败: {str(e)}")
        
        # 测试时空融合模块
        print("\n🔄 测试 SpatioTemporalFusion:")
        try:
            spatiotemporal_fusion = SpatioTemporalFusion(
                vit_dim=1024,
                temporal_dim=512
            ).to(self.device)
            
            # 创建测试数据
            vit_features = torch.randn(2, 196, 1024).to(self.device)  # 14x14 patches
            temporal_features = torch.randn(2, 196, 512).to(self.device)
            
            with torch.no_grad():
                fusion_output = spatiotemporal_fusion(vit_features, temporal_features)
            
            print(f"  ✅ ViT特征形状: {vit_features.shape}")
            print(f"  ✅ 时间特征形状: {temporal_features.shape}")
            print(f"  ✅ 融合输出形状: {fusion_output.shape}")
            print(f"  ✅ 参数数量: {sum(p.numel() for p in spatiotemporal_fusion.parameters()):,}")
            
        except Exception as e:
            print(f"  ❌ 失败: {str(e)}")
        
        # 测试物理约束层
        print("\n⚖️  测试 AdaptivePhysicsConstraints:")
        try:
            physics_constraints = AdaptivePhysicsConstraints(
                input_dim=3,
                output_dim=3
            ).to(self.device)
            
            # 创建测试数据
            spatial_data = torch.randn(2, 3, 56, 56).to(self.device)
            
            with torch.no_grad():
                constrained_output = physics_constraints(spatial_data)
            
            print(f"  ✅ 输入形状: {spatial_data.shape}")
            print(f"  ✅ 约束输出形状: {constrained_output.shape}")
            print(f"  ✅ 参数数量: {sum(p.numel() for p in physics_constraints.parameters()):,}")
            
        except Exception as e:
            print(f"  ❌ 失败: {str(e)}")
    
    def test_progressive_training(self):
        """测试渐进式训练"""
        print("\n" + "="*60)
        print("🎯 测试渐进式训练")
        print("="*60)
        
        from train_enhanced import ProgressiveTraining
        
        try:
            # 创建模型
            model = create_enhanced_model(self.config)
            
            # 创建渐进式训练器
            progressive_trainer = ProgressiveTraining(model, self.config)
            
            print(f"  ✅ 渐进式训练启用: {progressive_trainer.enabled}")
            
            if progressive_trainer.enabled:
                print(f"  ✅ ViT冻结epoch数: {progressive_trainer.vit_freeze_epochs}")
                print(f"  ✅ 时间编码器冻结epoch数: {progressive_trainer.temporal_freeze_epochs}")
                print(f"  ✅ 解冻学习率: {progressive_trainer.unfreeze_lr}")
                
                # 测试参数组
                param_groups = progressive_trainer.get_parameter_groups('adam')
                print(f"  ✅ 参数组数量: {len(param_groups)}")
                
                for i, group in enumerate(param_groups):
                    print(f"     组{i+1}: {len(group['params'])} 参数, lr={group['lr']}")
            
            # 测试冻结/解冻
            print("\n🧪 测试参数冻结:")
            progressive_trainer.freeze_vit()
            vit_frozen = all(not p.requires_grad for p in model.vit_assimilation.parameters())
            print(f"  ✅ ViT冻结状态: {vit_frozen}")
            
            progressive_trainer.unfreeze_vit()
            vit_unfrozen = all(p.requires_grad for p in model.vit_assimilation.parameters())
            print(f"  ✅ ViT解冻状态: {vit_unfrozen}")
            
        except Exception as e:
            print(f"  ❌ 失败: {str(e)}")
    
    def compare_with_original(self):
        """与原始模型对比"""
        print("\n" + "="*60)
        print("📊 与原始模型对比")
        print("="*60)
        
        try:
            # 创建增强模型
            enhanced_model = create_enhanced_model(self.config)
            enhanced_model.to(self.device)
            
            # 创建原始模型（需要调整配置）
            original_config = self.config.copy()
            original_config['model']['type'] = 'combined'
            original_config['model']['vit_model'] = 'vit_base_patch16_224'
            original_config['model']['unet_in_channels'] = 768
            original_config['model']['unet_out_channels'] = 3
            
            original_model = CombinedModel(original_config)
            original_model.to(self.device)
            
            # 计算参数对比
            enhanced_params = sum(p.numel() for p in enhanced_model.parameters())
            original_params = sum(p.numel() for p in original_model.parameters())
            
            print(f"  📈 参数数量对比:")
            print(f"     原始模型: {original_params:,}")
            print(f"     增强模型: {enhanced_params:,}")
            print(f"     增长比例: {(enhanced_params/original_params - 1)*100:.1f}%")
            
            # 性能对比
            print(f"\n  ⚡ 性能对比:")
            
            # 测试原始模型
            original_model.eval()
            with torch.no_grad():
                start_time = time.time()
                # 原始模型可能需要不同的输入格式，这里简化测试
                try:
                    original_output = original_model(
                        self.station_data.to(self.device), 
                        self.himawari_data.to(self.device)
                    )
                    original_time = time.time() - start_time
                    original_success = True
                except Exception as e:
                    print(f"     原始模型测试失败: {str(e)}")
                    original_time = float('inf')
                    original_success = False
            
            # 测试增强模型
            enhanced_model.eval()
            with torch.no_grad():
                start_time = time.time()
                enhanced_output = enhanced_model(
                    self.station_data.to(self.device), 
                    self.himawari_data.to(self.device)
                )
                enhanced_time = time.time() - start_time
            
            print(f"     原始模型推理时间: {original_time:.4f}s" if original_success else "     原始模型: 测试失败")
            print(f"     增强模型推理时间: {enhanced_time:.4f}s")
            
            if original_success:
                speed_ratio = enhanced_time / original_time
                print(f"     速度比率: {speed_ratio:.2f}x")
            
            # 内存使用对比
            if torch.cuda.is_available():
                torch.cuda.reset_peak_memory_stats()
                
                # 测试原始模型内存
                with torch.no_grad():
                    _ = original_model(
                        self.station_data.to(self.device), 
                        self.himawari_data.to(self.device)
                    )
                original_memory = torch.cuda.max_memory_allocated() / 1024**3
                
                torch.cuda.reset_peak_memory_stats()
                
                # 测试增强模型内存
                with torch.no_grad():
                    _ = enhanced_model(
                        self.station_data.to(self.device), 
                        self.himawari_data.to(self.device)
                    )
                enhanced_memory = torch.cuda.max_memory_allocated() / 1024**3
                
                print(f"\n  💾 内存使用对比:")
                print(f"     原始模型: {original_memory:.2f}GB")
                print(f"     增强模型: {enhanced_memory:.2f}GB")
                print(f"     内存增长: {(enhanced_memory/original_memory - 1)*100:.1f}%")
            
        except Exception as e:
            print(f"  ❌ 对比测试失败: {str(e)}")
    
    def test_data_compatibility(self):
        """测试数据兼容性"""
        print("\n" + "="*60)
        print("🔗 测试数据兼容性")
        print("="*60)
        
        try:
            # 尝试创建数据集
            dataset = WeatherDataset(
                config=self.config,
                mode='train'
            )
            
            print(f"  ✅ 数据集创建成功")
            print(f"  ✅ 数据集大小: {len(dataset)}")
            
            # 测试数据加载
            if len(dataset) > 0:
                sample = dataset[0]
                print(f"  ✅ 样本结构:")
                for key, value in sample.items():
                    if isinstance(value, torch.Tensor):
                        print(f"     {key}: {value.shape} ({value.dtype})")
                    else:
                        print(f"     {key}: {type(value)}")
                
                # 测试批量加载
                from torch.utils.data import DataLoader
                
                dataloader = DataLoader(
                    dataset,
                    batch_size=2,
                    shuffle=True,
                    collate_fn=collate_fn,
                    num_workers=0
                )
                
                batch = next(iter(dataloader))
                print(f"  ✅ 批量加载成功:")
                for key, value in batch.items():
                    if isinstance(value, torch.Tensor):
                        print(f"     {key}: {value.shape} ({value.dtype})")
                
                # 测试模型与真实数据的兼容性
                model = create_enhanced_model(self.config)
                model.to(self.device)
                model.eval()
                
                with torch.no_grad():
                    station_data = batch['station_data'].to(self.device)
                    himawari_data = batch['himawari_data'].to(self.device)
                    
                    output = model(station_data, himawari_data)
                    
                    print(f"  ✅ 真实数据测试成功:")
                    print(f"     输入形状: station={station_data.shape}, himawari={himawari_data.shape}")
                    print(f"     输出形状: {output.shape}")
            
        except Exception as e:
            print(f"  ❌ 数据兼容性测试失败: {str(e)}")
    
    def generate_test_report(self):
        """生成测试报告"""
        print("\n" + "="*60)
        print("📋 生成测试报告")
        print("="*60)
        
        # 创建报告目录
        report_dir = "test_reports"
        os.makedirs(report_dir, exist_ok=True)
        
        # 生成文本报告
        report_path = os.path.join(report_dir, f"enhanced_model_test_{datetime.now().strftime('%Y%m%d_%H%M%S')}.txt")
        
        with open(report_path, 'w', encoding='utf-8') as f:
            f.write("增强版气象预测模型测试报告\n")
            f.write("="*60 + "\n")
            f.write(f"测试时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
            f.write(f"设备: {self.device}\n\n")
            
            # ViT配置测试结果
            f.write("ViT配置测试结果:\n")
            f.write("-" * 30 + "\n")
            for config, result in self.test_results.items():
                if result.get('success', False):
                    f.write(f"{config}:\n")
                    f.write(f"  总参数: {result['total_params']:,}\n")
                    f.write(f"  可训练参数: {result['trainable_params']:,}\n")
                    f.write(f"  前向传播时间: {result['forward_time']:.4f}s\n")
                    f.write(f"  内存使用: {result['memory_used']:.2f}GB\n")
                    f.write(f"  输出形状: {result['output_shape']}\n")
                    f.write(f"  形状正确: {result['shape_correct']}\n")
                    f.write("\n")
                else:
                    f.write(f"{config}: 测试失败 - {result.get('error', 'Unknown error')}\n\n")
            
            # 系统信息
            f.write("系统信息:\n")
            f.write("-" * 30 + "\n")
            f.write(f"CPU: {psutil.cpu_count()} 核心\n")
            f.write(f"内存: {psutil.virtual_memory().total / (1024**3):.1f}GB\n")
            if torch.cuda.is_available():
                f.write(f"GPU: {torch.cuda.get_device_name()}\n")
                f.write(f"GPU内存: {torch.cuda.get_device_properties(0).total_memory / (1024**3):.1f}GB\n")
            
            f.write("\n测试完成！\n")
        
        print(f"  ✅ 测试报告已保存: {report_path}")
    
    def run_all_tests(self):
        """运行所有测试"""
        print("🚀 开始运行增强版模型测试...")
        
        # 运行各项测试
        self.test_model_architecture()
        self.test_model_components()
        self.test_progressive_training()
        self.compare_with_original()
        self.test_data_compatibility()
        
        # 生成报告
        self.generate_test_report()
        
        print("\n" + "="*60)
        print("🎉 所有测试完成！")
        print("="*60)

def main():
    """主函数"""
    # 创建测试器
    tester = ModelTester()
    
    # 运行所有测试
    tester.run_all_tests()

if __name__ == "__main__":
    main()
