#!/usr/bin/env python3
"""
增强版气象预测模型 - 简化版本
集成ViT升级、时间序列建模和优化物理约束
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import math

class SimpleViT(nn.Module):
    """简化的ViT模型，避免下载预训练权重"""
    def __init__(self, img_size=224, patch_size=16, in_channels=3, embed_dim=768, num_heads=12, num_layers=12):
        super().__init__()
        self.img_size = img_size
        self.patch_size = patch_size
        self.num_patches = (img_size // patch_size) ** 2
        self.embed_dim = embed_dim
        
        # Patch embedding
        self.patch_embed = nn.Conv2d(in_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
        
        # Class token and position embedding
        self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
        self.pos_embed = nn.Parameter(torch.zeros(1, self.num_patches + 1, embed_dim))
        
        # Transformer blocks
        self.blocks = nn.ModuleList([
            nn.TransformerEncoderLayer(
                d_model=embed_dim,
                nhead=num_heads,
                dim_feedforward=embed_dim * 4,
                dropout=0.1,
                activation='gelu',
                batch_first=True
            ) for _ in range(num_layers)
        ])
        
        self.norm = nn.LayerNorm(embed_dim)
        self._init_weights()
    
    def _init_weights(self):
        """初始化权重"""
        nn.init.trunc_normal_(self.pos_embed, std=0.02)
        nn.init.trunc_normal_(self.cls_token, std=0.02)
        
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.trunc_normal_(m.weight, std=0.02)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)
    
    def forward(self, x):
        """
        Args:
            x: [batch_size, in_channels, height, width]
        Returns:
            features: [batch_size, num_patches, embed_dim]
        """
        batch_size = x.shape[0]
        
        # Patch embedding
        x = self.patch_embed(x)  # [batch_size, embed_dim, h_patches, w_patches]
        x = x.flatten(2).transpose(1, 2)  # [batch_size, num_patches, embed_dim]
        
        # Add class token
        cls_tokens = self.cls_token.expand(batch_size, -1, -1)
        x = torch.cat([cls_tokens, x], dim=1)
        
        # Add position embedding
        x = x + self.pos_embed
        
        # Transformer blocks
        for block in self.blocks:
            x = block(x)
        
        x = self.norm(x)
        
        # Remove class token, return patch features
        return x[:, 1:, :]

class TemporalSequenceEncoder(nn.Module):
    """时间序列编码器 - 处理480个时间点的气象数据"""
    def __init__(self, input_dim=3, hidden_dim=256, num_layers=2, dropout=0.1):
        super().__init__()
        self.hidden_dim = hidden_dim
        
        # LSTM编码器
        self.lstm_encoder = nn.LSTM(
            input_size=input_dim,
            hidden_size=hidden_dim,
            num_layers=num_layers,
            batch_first=True,
            dropout=dropout if num_layers > 1 else 0,
            bidirectional=True
        )
        
        # 输出投影层
        self.output_proj = nn.Sequential(
            nn.Linear(hidden_dim*2, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        self._init_weights()
    
    def _init_weights(self):
        """初始化权重"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)
    
    def forward(self, x):
        """
        Args:
            x: [batch_size, seq_len, input_dim] - 气象时间序列数据
        Returns:
            temporal_features: [batch_size, hidden_dim] - 时间特征
        """
        # LSTM编码
        lstm_out, _ = self.lstm_encoder(x)  # [batch_size, seq_len, hidden_dim*2]
        
        # 使用最后一个时间步的特征
        last_output = lstm_out[:, -1, :]  # [batch_size, hidden_dim*2]
        
        # 输出投影
        output = self.output_proj(last_output)  # [batch_size, hidden_dim]
        
        return output

class SpatioTemporalFusion(nn.Module):
    """时空融合模块 - 融合ViT空间特征和时间序列特征"""
    def __init__(self, vit_dim=768, temporal_dim=256, num_heads=8, dropout=0.1):
        super().__init__()
        self.vit_dim = vit_dim
        self.temporal_dim = temporal_dim
        
        # 维度对齐
        self.temporal_proj = nn.Linear(temporal_dim, vit_dim)
        
        # 交叉注意力机制
        self.cross_attention = nn.MultiheadAttention(
            embed_dim=vit_dim,
            num_heads=num_heads,
            dropout=dropout,
            batch_first=True
        )
        
        # 特征融合层
        self.fusion_layer = nn.Sequential(
            nn.Linear(vit_dim * 2, vit_dim),
            nn.LayerNorm(vit_dim),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(vit_dim, vit_dim),
            nn.LayerNorm(vit_dim)
        )
        
        self._init_weights()
    
    def _init_weights(self):
        """初始化权重"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)
    
    def forward(self, vit_features, temporal_features):
        """
        Args:
            vit_features: [batch_size, num_patches, vit_dim] - ViT空间特征
            temporal_features: [batch_size, temporal_dim] - 时间特征
        Returns:
            fused_features: [batch_size, num_patches, vit_dim] - 融合特征
        """
        # 维度对齐
        temporal_aligned = self.temporal_proj(temporal_features)  # [batch_size, vit_dim]
        temporal_expanded = temporal_aligned.unsqueeze(1).expand(-1, vit_features.size(1), -1)
        
        # 交叉注意力：空间特征查询时间特征
        cross_attended, _ = self.cross_attention(
            vit_features, temporal_expanded, temporal_expanded
        )
        
        # 特征融合
        combined = torch.cat([cross_attended, vit_features], dim=-1)
        fused = self.fusion_layer(combined)
        
        return fused

class SimpleUNet(nn.Module):
    """简化的UNet降尺度模块"""
    def __init__(self, in_channels, out_channels, img_size=224):
        super().__init__()
        
        # 编码器
        self.encoder1 = self._conv_block(in_channels, 64)
        self.encoder2 = self._conv_block(64, 128)
        self.encoder3 = self._conv_block(128, 256)
        self.encoder4 = self._conv_block(256, 512)
        
        # 瓶颈层
        self.bottleneck = self._conv_block(512, 1024)
        
        # 解码器
        self.decoder4 = self._upconv_block(1024, 512)
        self.decoder3 = self._upconv_block(512, 256)
        self.decoder2 = self._upconv_block(256, 128)
        self.decoder1 = self._upconv_block(128, 64)
        
        # 最终输出层
        self.final_conv = nn.Sequential(
            nn.Conv2d(64, 32, 3, padding=1),
            nn.GELU(),
            nn.Conv2d(32, out_channels, 3, padding=1)
        )
        
        self._init_weights()
    
    def _conv_block(self, in_channels, out_channels):
        """卷积块"""
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.GELU(),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.GELU(),
            nn.Dropout2d(0.1)
        )
    
    def _upconv_block(self, in_channels, out_channels):
        """上采样卷积块"""
        return nn.Sequential(
            nn.ConvTranspose2d(in_channels, out_channels, 2, stride=2),
            nn.BatchNorm2d(out_channels),
            nn.GELU(),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.GELU(),
            nn.Dropout2d(0.1)
        )
    
    def _init_weights(self):
        """初始化权重"""
        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)
    
    def forward(self, x):
        """
        Args:
            x: [batch_size, in_channels, height, width]
        Returns:
            output: [batch_size, out_channels, height, width]
        """
        # 编码路径
        enc1 = self.encoder1(x)
        enc2 = self.encoder2(F.max_pool2d(enc1, 2))
        enc3 = self.encoder3(F.max_pool2d(enc2, 2))
        enc4 = self.encoder4(F.max_pool2d(enc3, 2))
        
        # 瓶颈
        bottleneck = self.bottleneck(F.max_pool2d(enc4, 2))
        
        # 解码路径
        dec4 = self.decoder4(bottleneck)
        dec4 = F.interpolate(dec4, size=enc4.shape[2:], mode='bilinear', align_corners=False)
        dec4 = dec4 + enc4
        
        dec3 = self.decoder3(dec4)
        dec3 = F.interpolate(dec3, size=enc3.shape[2:], mode='bilinear', align_corners=False)
        dec3 = dec3 + enc3
        
        dec2 = self.decoder2(dec3)
        dec2 = F.interpolate(dec2, size=enc2.shape[2:], mode='bilinear', align_corners=False)
        dec2 = dec2 + enc2
        
        dec1 = self.decoder1(dec2)
        dec1 = F.interpolate(dec1, size=enc1.shape[2:], mode='bilinear', align_corners=False)
        dec1 = dec1 + enc1
        
        # 最终输出
        output = self.final_conv(dec1)
        
        return output

class SimplePhysicsConstraints(nn.Module):
    """简化的物理约束层"""
    def __init__(self, input_dim):
        super().__init__()
        self.input_dim = input_dim
        
        # 平滑性约束
        self.smoothness_conv = nn.Conv2d(input_dim, input_dim, 3, padding=1, groups=input_dim)
        
        # 能量守恒约束
        self.energy_conv = nn.Sequential(
            nn.Conv2d(input_dim, input_dim, 1),
            nn.Sigmoid()
        )
        
        self._init_weights()
    
    def _init_weights(self):
        """初始化权重"""
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
    
    def forward(self, x):
        """
        Args:
            x: [batch_size, input_dim, height, width]
        Returns:
            constrained_x: [batch_size, input_dim, height, width]
        """
        # 平滑性约束
        smoothness = self.smoothness_conv(x)
        
        # 能量守恒约束
        energy_constraint = self.energy_conv(x)
        
        # 应用约束
        constrained_x = x + 0.1 * smoothness + 0.1 * energy_constraint * x
        
        return constrained_x

class EnhancedCombinedModel(nn.Module):
    """增强版组合模型 - 简化版本"""
    def __init__(self, config):
        super().__init__()
        self.config = config
        
        # 配置参数
        input_dim = config['model']['num_output_vars']
        num_channels = config['model']['num_channels']
        img_size = config['model']['img_height']
        
        # 时间序列编码器
        self.temporal_encoder = TemporalSequenceEncoder(
            input_dim=input_dim,
            hidden_dim=config['model'].get('temporal_hidden_dim', 256),
            num_layers=config['model'].get('temporal_num_layers', 2),
            dropout=config['model'].get('temporal_dropout', 0.1)
        )
        
        # 简化ViT同化模块
        self.vit_assimilation = SimpleViT(
            img_size=img_size,
            patch_size=16,
            in_channels=num_channels,
            embed_dim=768,
            num_heads=12,
            num_layers=6  # 减少层数以适应CPU
        )
        
        # 时空融合模块
        self.spatiotemporal_fusion = SpatioTemporalFusion(
            vit_dim=768,
            temporal_dim=config['model'].get('temporal_hidden_dim', 256),
            num_heads=config['model'].get('fusion_heads', 8),
            dropout=config['model'].get('fusion_dropout', 0.1)
        )
        
        # 特征重塑层
        self.feature_reshape = nn.Sequential(
            nn.Linear(768, 512),
            nn.LayerNorm(512),
            nn.GELU(),
            nn.Dropout(0.1)
        )
        
        # 简化UNet降尺度
        self.unet_downscale = SimpleUNet(
            in_channels=512,
            out_channels=input_dim,
            img_size=img_size
        )
        
        # 简化物理约束
        self.physics_constraints = SimplePhysicsConstraints(input_dim)
        
        # 最终输出层
        future_hours = config['data']['future_hours']
        self.output_layer = nn.Sequential(
            nn.Linear(input_dim, input_dim),
            nn.LayerNorm(input_dim),
            nn.GELU(),
            nn.Dropout(0.1),
            nn.Linear(input_dim, input_dim)
        )
        
        self._init_weights()
    
    def _init_weights(self):
        """初始化权重"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)
    
    def forward(self, station_data, himawari_data):
        """
        Args:
            station_data: [batch_size, seq_len, input_dim] - 气象时间序列数据
            himawari_data: [batch_size, num_channels, height, width] - 卫星云图数据
        Returns:
            output: [batch_size, future_steps, output_dim] - 预测结果
        """
        batch_size = station_data.shape[0]
        future_hours = self.config['data']['future_hours']
        future_steps = future_hours * 4  # 15分钟间隔
        
        # 时间序列编码
        temporal_features = self.temporal_encoder(station_data)
        
        # ViT特征提取
        vit_features = self.vit_assimilation(himawari_data)
        
        # 时空融合
        fused_features = self.spatiotemporal_fusion(vit_features, temporal_features)
        
        # 特征重塑
        enhanced_features = self.feature_reshape(fused_features)
        
        # 重塑为空间格式用于UNet
        patch_size = 16
        h = w = int(math.sqrt(enhanced_features.size(1)))
        spatial_features = enhanced_features.transpose(1, 2).view(
            batch_size, -1, h, w
        )
        
        # UNet降尺度
        downscaled = self.unet_downscale(spatial_features)
        
        # 物理约束
        constrained_output = self.physics_constraints(downscaled)
        
        # 全局平均池化
        pooled = F.adaptive_avg_pool2d(constrained_output, 1).view(batch_size, -1)
        
        # 扩展到未来时间步
        output_sequence = pooled.unsqueeze(1).expand(-1, future_steps, -1)
        
        # 最终输出
        final_output = self.output_layer(output_sequence)
        
        return final_output

def create_enhanced_model(config):
    """创建增强版模型"""
    model = EnhancedCombinedModel(config)
    
    # 计算参数数量
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    print(f"Enhanced Model Created (Simple Version):")
    print(f"  Total parameters: {total_params:,}")
    print(f"  Trainable parameters: {trainable_params:,}")
    print(f"  Temporal hidden dim: {config['model'].get('temporal_hidden_dim', 256)}")
    
    return model

if __name__ == "__main__":
    # 测试模型
    import yaml
    
    # 加载配置
    with open('config_enhanced_cpu.yaml', 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
    
    # 创建模型
    model = create_enhanced_model(config)
    
    # 测试前向传播
    batch_size = 2
    seq_len = 480  # 5天 * 24小时 * 4个15分钟点
    input_dim = 3
    num_channels = 4
    height, width = 224, 224
    
    station_data = torch.randn(batch_size, seq_len, input_dim)
    himawari_data = torch.randn(batch_size, num_channels, height, width)
    
    with torch.no_grad():
        output = model(station_data, himawari_data)
    
    print(f"\nForward pass test:")
    print(f"  Input shapes: station_data={station_data.shape}, himawari_data={himawari_data.shape}")
    print(f"  Output shape: {output.shape}")
    print(f"  Expected output shape: ({batch_size}, {config['data']['future_hours']*4}, {input_dim})")
    
    # 验证输出形状
    expected_shape = (batch_size, config['data']['future_hours']*4, input_dim)
    assert output.shape == expected_shape, f"Output shape mismatch: {output.shape} vs {expected_shape}"
    print("✓ Model test passed!")
