#!/usr/bin/env python3
"""
增强版气象预测模型
集成ViT升级、时间序列建模和优化物理约束
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from timm.models.vision_transformer import vit_base_patch16_224, vit_large_patch16_224, vit_huge_patch14_224
from timm.models.layers import trunc_normal_

# ViT配置选项
VIT_CONFIGS = {
    'vit_base': {
        'model_name': 'vit_base_patch16_224',
        'embed_dim': 768,
        'num_heads': 12,
        'num_layers': 12,
        'patch_size': 16
    },
    'vit_large': {
        'model_name': 'vit_large_patch16_224', 
        'embed_dim': 1024,
        'num_heads': 16,
        'num_layers': 24,
        'patch_size': 16
    },
    'vit_huge': {
        'model_name': 'vit_huge_patch14_224',
        'embed_dim': 1280, 
        'num_heads': 16,
        'num_layers': 32,
        'patch_size': 14
    }
}

class TemporalSequenceEncoder(nn.Module):
    """时间序列编码器 - 处理480个时间点的气象数据"""
    def __init__(self, input_dim=3, hidden_dim=512, num_layers=3, dropout=0.1):
        super().__init__()
        self.hidden_dim = hidden_dim
        
        # LSTM编码器 - 双向获取时序信息
        self.lstm_encoder = nn.LSTM(
            input_size=input_dim,
            hidden_size=hidden_dim,
            num_layers=num_layers,
            batch_first=True,
            dropout=dropout if num_layers > 1 else 0,
            bidirectional=True
        )
        
        # Transformer编码器 - 捕获长距离依赖
        self.temporal_transformer = nn.TransformerEncoder(
            nn.TransformerEncoderLayer(
                d_model=hidden_dim*2,
                nhead=8,
                dim_feedforward=hidden_dim*4,
                dropout=dropout,
                activation='gelu',
                batch_first=True
            ),
            num_layers=2
        )
        
        # 时间位置编码
        self.temporal_pe = self._create_temporal_pe(480, hidden_dim*2)
        
        # 输出投影层
        self.output_proj = nn.Sequential(
            nn.Linear(hidden_dim*2, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        self._init_weights()
    
    def _create_temporal_pe(self, max_len, d_model):
        """创建时间位置编码"""
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * 
                           (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        return pe.unsqueeze(0)  # [1, max_len, d_model]
    
    def _init_weights(self):
        """初始化权重"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                trunc_normal_(m.weight, std=0.02)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)
    
    def forward(self, x):
        """
        Args:
            x: [batch_size, seq_len, input_dim] - 气象时间序列数据
        Returns:
            temporal_features: [batch_size, seq_len, hidden_dim] - 时间特征
        """
        batch_size, seq_len, _ = x.shape
        
        # LSTM编码
        lstm_out, _ = self.lstm_encoder(x)  # [batch_size, seq_len, hidden_dim*2]
        
        # 添加时间位置编码
        if self.temporal_pe.device != x.device:
            self.temporal_pe = self.temporal_pe.to(x.device)
        lstm_out = lstm_out + self.temporal_pe[:, :seq_len, :]
        
        # Transformer处理
        temporal_features = self.temporal_transformer(lstm_out)
        
        # 输出投影
        output = self.output_proj(temporal_features)
        
        return output

class SpatioTemporalFusion(nn.Module):
    """时空融合模块 - 融合ViT空间特征和时间序列特征"""
    def __init__(self, vit_dim=1024, temporal_dim=512, num_heads=16, dropout=0.1):
        super().__init__()
        self.vit_dim = vit_dim
        self.temporal_dim = temporal_dim
        
        # 维度对齐
        self.temporal_proj = nn.Linear(temporal_dim, vit_dim)
        self.vit_proj = nn.Linear(vit_dim, vit_dim)
        
        # 交叉注意力机制
        self.cross_attention = nn.MultiheadAttention(
            embed_dim=vit_dim,
            num_heads=num_heads,
            dropout=dropout,
            batch_first=True
        )
        
        # 自注意力增强
        self.self_attention = nn.MultiheadAttention(
            embed_dim=vit_dim,
            num_heads=num_heads,
            dropout=dropout,
            batch_first=True
        )
        
        # 特征融合层
        self.fusion_layer = nn.Sequential(
            nn.Linear(vit_dim * 2, vit_dim),
            nn.LayerNorm(vit_dim),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(vit_dim, vit_dim),
            nn.LayerNorm(vit_dim)
        )
        
        # 门控机制
        self.gate = nn.Sequential(
            nn.Linear(vit_dim * 2, vit_dim),
            nn.Sigmoid()
        )
        
        self._init_weights()
    
    def _init_weights(self):
        """初始化权重"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                trunc_normal_(m.weight, std=0.02)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)
    
    def forward(self, vit_features, temporal_features):
        """
        Args:
            vit_features: [batch_size, num_patches, vit_dim] - ViT空间特征
            temporal_features: [batch_size, seq_len, temporal_dim] - 时间特征
        Returns:
            fused_features: [batch_size, num_patches, vit_dim] - 融合特征
        """
        # 维度对齐
        temporal_aligned = self.temporal_proj(temporal_features)
        vit_aligned = self.vit_proj(vit_features)
        
        # 交叉注意力：空间特征查询时间特征
        cross_attended, _ = self.cross_attention(
            vit_aligned, temporal_aligned, temporal_aligned
        )
        
        # 自注意力增强
        self_attended, _ = self.self_attention(
            cross_attended, cross_attended, cross_attended
        )
        
        # 特征融合
        combined = torch.cat([self_attended, vit_aligned], dim=-1)
        fused = self.fusion_layer(combined)
        
        # 门控机制
        gate_weights = self.gate(combined)
        gated_output = gate_weights * fused + (1 - gate_weights) * vit_aligned
        
        return gated_output

class ConservationConstraint(nn.Module):
    """能量守恒物理约束"""
    def __init__(self, dim):
        super().__init__()
        self.dim = dim
        
        # 能量计算网络
        self.energy_net = nn.Sequential(
            nn.Conv2d(dim, dim, 1),
            nn.GELU(),
            nn.Conv2d(dim, dim, 1)
        )
        
        # 约束强度网络
        self.constraint_strength = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(dim, dim, 1),
            nn.Sigmoid()
        )
        
        # 归一化层
        self.norm = nn.LayerNorm(dim)
        
        self._init_weights()
    
    def _init_weights(self):
        """初始化权重"""
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)
    
    def forward(self, x):
        """
        Args:
            x: [batch_size, dim, height, width]
        Returns:
            constrained_x: [batch_size, dim, height, width]
        """
        # 计算当前能量
        current_energy = torch.sum(x**2, dim=1, keepdim=True)
        
        # 预测约束能量
        constrained_energy = self.energy_net(x)
        
        # 计算约束强度
        strength = self.constraint_strength(x)
        
        # 应用能量守恒约束
        energy_ratio = torch.tanh(constrained_energy / (current_energy + 1e-8))
        constrained_x = x * (1 + strength * (energy_ratio - 1))
        
        # 残差连接和归一化
        batch_size, dim, height, width = x.shape
        x_reshaped = x.permute(0, 2, 3, 1).reshape(-1, dim)
        constrained_reshaped = constrained_x.permute(0, 2, 3, 1).reshape(-1, dim)
        
        output = self.norm(constrained_reshaped + x_reshaped)
        output = output.reshape(batch_size, height, width, dim).permute(0, 3, 1, 2)
        
        return output

class AdaptivePhysicsConstraints(nn.Module):
    """自适应权重的物理约束层"""
    def __init__(self, input_dim, output_dim, config=None):
        super().__init__()
        self.input_dim = input_dim
        self.output_dim = output_dim
        
        # 可学习的权重参数
        self.advection_weight = nn.Parameter(torch.tensor(1.0))
        self.terrain_weight = nn.Parameter(torch.tensor(1.0))
        self.conservation_weight = nn.Parameter(torch.tensor(1.0))
        
        # 增强版风场平流约束
        self.enhanced_advection = nn.Sequential(
            nn.Conv2d(input_dim, input_dim*2, 3, padding=1, groups=input_dim),
            nn.GELU(),
            nn.Conv2d(input_dim*2, input_dim*2, 3, padding=1),
            nn.GELU(),
            nn.Conv2d(input_dim*2, input_dim, 3, padding=1),
            nn.Sigmoid()
        )
        
        # 增强版地形校正约束
        self.enhanced_terrain = nn.Sequential(
            nn.Conv2d(input_dim, input_dim*2, 3, padding=1, groups=input_dim),
            nn.GELU(),
            nn.Conv2d(input_dim*2, input_dim*2, 3, padding=1),
            nn.GELU(),
            nn.Conv2d(input_dim*2, input_dim, 3, padding=1),
            nn.Tanh()
        )
        
        # 能量守恒约束
        self.conservation_layer = ConservationConstraint(input_dim)
        
        # 自适应权重网络
        self.adaptive_weights = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(input_dim, input_dim//4, 1),
            nn.GELU(),
            nn.Conv2d(input_dim//4, 3, 1),  # 3个约束权重
            nn.Softmax(dim=1)
        )
        
        self._init_weights()
    
    def _init_weights(self):
        """初始化权重"""
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)
    
    def forward(self, x, wind_u=None, wind_v=None):
        """
        Args:
            x: [batch_size, input_dim, height, width]
            wind_u: [batch_size, height, width] - u风场 (可选)
            wind_v: [batch_size, height, width] - v风场 (可选)
        Returns:
            constrained_x: [batch_size, input_dim, height, width]
        """
        # 计算自适应权重
        adaptive_w = self.adaptive_weights(x)  # [batch_size, 3, 1, 1]
        adv_w, terrain_w, cons_w = adaptive_w[:, 0:1], adaptive_w[:, 1:2], adaptive_w[:, 2:3]
        
        # 基础权重（可学习参数）
        base_adv_w = torch.sigmoid(self.advection_weight)
        base_terrain_w = torch.sigmoid(self.terrain_weight)
        base_cons_w = torch.sigmoid(self.conservation_weight)
        
        # 组合权重
        final_adv_w = adv_w * base_adv_w
        final_terrain_w = terrain_w * base_terrain_w
        final_cons_w = cons_w * base_cons_w
        
        # 增强版约束
        advection_constraint = self.enhanced_advection(x) * final_adv_w
        terrain_constraint = self.enhanced_terrain(x) * final_terrain_w
        conservation_constraint = self.conservation_layer(x) * final_cons_w
        
        # 如果有风场数据，可以进一步优化平流约束
        if wind_u is not None and wind_v is not None:
            # 这里可以添加基于实际风场的平流计算
            # 简化实现：使用风场信息调整平流约束
            wind_magnitude = torch.sqrt(wind_u**2 + wind_v**2).unsqueeze(1)
            advection_constraint = advection_constraint * (1 + wind_magnitude * 0.1)
        
        # 应用所有约束
        constrained_x = x + advection_constraint + terrain_constraint + conservation_constraint
        
        return constrained_x

class EnhancedVitAssimilation(nn.Module):
    """增强版ViT同化模块"""
    def __init__(self, num_channels, vit_config='vit_large', img_size=224, patch_size=16):
        super().__init__()
        self.vit_config = VIT_CONFIGS[vit_config]
        self.num_channels = num_channels
        self.img_size = img_size
        self.patch_size = patch_size
        
        # 输入投影层 - 将多通道数据投影到ViT输入维度
        self.input_projection = nn.Sequential(
            nn.Conv2d(num_channels, self.vit_config['embed_dim']//4, 3, padding=1),
            nn.GELU(),
            nn.Conv2d(self.vit_config['embed_dim']//4, self.vit_config['embed_dim']//2, 3, padding=1),
            nn.GELU(),
            nn.Conv2d(self.vit_config['embed_dim']//2, self.vit_config['embed_dim'], 3, padding=1),
            nn.LayerNorm([self.vit_config['embed_dim'], img_size, img_size])
        )
        
        # 加载预训练ViT模型
        if vit_config == 'vit_base':
            self.vit = vit_base_patch16_224(pretrained=True)
        elif vit_config == 'vit_large':
            self.vit = vit_large_patch16_224(pretrained=True)
        elif vit_config == 'vit_huge':
            self.vit = vit_huge_patch14_224(pretrained=True)
        
        # 替换ViT的patch embedding（因为我们的输入已经是投影后的特征）
        self.vit.patch_embed = nn.Identity()
        
        # 特征增强层
        self.feature_enhance = nn.Sequential(
            nn.Linear(self.vit_config['embed_dim'], self.vit_config['embed_dim']),
            nn.LayerNorm(self.vit_config['embed_dim']),
            nn.GELU(),
            nn.Dropout(0.1)
        )
        
        # 空间注意力模块
        self.spatial_attention = nn.Sequential(
            nn.Conv2d(self.vit_config['embed_dim'], 1, 1),
            nn.Sigmoid()
        )
        
        self._init_weights()
    
    def _init_weights(self):
        """初始化权重"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                trunc_normal_(m.weight, std=0.02)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)
            elif isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
    
    def forward(self, x):
        """
        Args:
            x: [batch_size, num_channels, height, width]
        Returns:
            vit_features: [batch_size, num_patches, embed_dim]
        """
        batch_size = x.shape[0]
        
        # 输入投影
        x_proj = self.input_projection(x)  # [batch_size, embed_dim, height, width]
        
        # 重塑为ViT输入格式
        # ViT期望: [batch_size, num_patches, embed_dim]
        # 其中 num_patches = (height * width) / (patch_size * patch_size)
        h, w = x_proj.shape[2], x_proj.shape[3]
        num_patches = (h // self.patch_size) * (w // self.patch_size)
        
        # 分块并展平
        x_patches = x_proj.unfold(2, self.patch_size, self.patch_size)\
                          .unfold(3, self.patch_size, self.patch_size)
        x_patches = x_patches.contiguous().view(
            batch_size, self.vit_config['embed_dim'], num_patches, -1
        )
        x_patches = x_patches.permute(0, 2, 3, 1).contiguous()
        x_patches = x_patches.view(batch_size, num_patches, -1)
        
        # 添加class token（ViT需要）
        cls_token = self.vit.cls_token.expand(batch_size, -1, -1)
        x_with_cls = torch.cat([cls_token, x_patches], dim=1)
        
        # 添加位置编码
        pos_embed = self.vit.pos_embed[:, :num_patches+1, :]
        x_pos = x_with_cls + pos_embed
        
        # ViT前向传播
        vit_output = self.vit.blocks(x_pos)
        vit_features = vit_output[:, 1:, :]  # 去掉class token
        
        # 特征增强
        enhanced_features = self.feature_enhance(vit_features)
        
        # 重塑为空间格式进行空间注意力
        enhanced_spatial = enhanced_features.transpose(1, 2).view(
            batch_size, self.vit_config['embed_dim'], h//self.patch_size, w//self.patch_size
        )
        
        # 空间注意力
        attention_weights = self.spatial_attention(enhanced_spatial)
        attended_features = enhanced_spatial * attention_weights
        
        # 转回patch格式
        final_features = attended_features.view(
            batch_size, self.vit_config['embed_dim'], num_patches, -1
        ).permute(0, 2, 3, 1).contiguous().view(batch_size, num_patches, -1)
        
        return final_features

class EnhancedUNetDownscale(nn.Module):
    """增强版UNet降尺度模块"""
    def __init__(self, in_channels, out_channels, img_size=224):
        super().__init__()
        self.img_size = img_size
        
        # 编码器
        self.encoder1 = self._conv_block(in_channels, 256)
        self.encoder2 = self._conv_block(256, 512)
        self.encoder3 = self._conv_block(512, 1024)
        self.encoder4 = self._conv_block(1024, 2048)
        
        # 瓶颈层
        self.bottleneck = self._conv_block(2048, 4096)
        
        # 解码器
        self.decoder4 = self._upconv_block(4096, 2048)
        self.decoder3 = self._upconv_block(2048, 1024)
        self.decoder2 = self._upconv_block(1024, 512)
        self.decoder1 = self._upconv_block(512, 256)
        
        # 最终输出层
        self.final_conv = nn.Sequential(
            nn.Conv2d(256, 128, 3, padding=1),
            nn.GELU(),
            nn.Conv2d(128, 64, 3, padding=1),
            nn.GELU(),
            nn.Conv2d(64, out_channels, 3, padding=1)
        )
        
        # 注意力门控
        self.attention_gates = nn.ModuleList([
            self._attention_gate(2048, 2048),
            self._attention_gate(1024, 1024),
            self._attention_gate(512, 512),
            self._attention_gate(256, 256)
        ])
        
        # 残差连接
        self.residual_connections = nn.ModuleList([
            nn.Conv2d(256, 256, 1),
            nn.Conv2d(512, 512, 1),
            nn.Conv2d(1024, 1024, 1),
            nn.Conv2d(2048, 2048, 1)
        ])
        
        self._init_weights()
    
    def _conv_block(self, in_channels, out_channels):
        """卷积块"""
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            nn.GroupNorm(8, out_channels),
            nn.GELU(),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.GroupNorm(8, out_channels),
            nn.GELU(),
            nn.Dropout2d(0.1)
        )
    
    def _upconv_block(self, in_channels, out_channels):
        """上采样卷积块"""
        return nn.Sequential(
            nn.ConvTranspose2d(in_channels, out_channels, 2, stride=2),
            nn.GroupNorm(8, out_channels),
            nn.GELU(),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.GroupNorm(8, out_channels),
            nn.GELU(),
            nn.Dropout2d(0.1)
        )
    
    def _attention_gate(self, gate_channels, skip_channels):
        """注意力门控"""
        return nn.Sequential(
            nn.Conv2d(gate_channels + skip_channels, gate_channels, 1),
            nn.GELU(),
            nn.Conv2d(gate_channels, 1, 1),
            nn.Sigmoid()
        )
    
    def _init_weights(self):
        """初始化权重"""
        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.GroupNorm) or isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)
    
    def forward(self, x):
        """
        Args:
            x: [batch_size, in_channels, height, width]
        Returns:
            output: [batch_size, out_channels, height, width]
        """
        # 编码路径
        enc1 = self.encoder1(x)
        enc2 = self.encoder2(F.max_pool2d(enc1, 2))
        enc3 = self.encoder3(F.max_pool2d(enc2, 2))
        enc4 = self.encoder4(F.max_pool2d(enc3, 2))
        
        # 瓶颈
        bottleneck = self.bottleneck(F.max_pool2d(enc4, 2))
        
        # 解码路径
        dec4 = self.decoder4(bottleneck)
        att4 = self.attention_gates[0](torch.cat([dec4, enc4], dim=1))
        dec4 = dec4 * att4 + self.residual_connections[2](enc4)
        
        dec3 = self.decoder3(dec4)
        att3 = self.attention_gates[1](torch.cat([dec3, enc3], dim=1))
        dec3 = dec3 * att3 + self.residual_connections[1](enc3)
        
        dec2 = self.decoder2(dec3)
        att2 = self.attention_gates[2](torch.cat([dec2, enc2], dim=1))
        dec2 = dec2 * att2 + self.residual_connections[0](enc2)
        
        dec1 = self.decoder1(dec2)
        att1 = self.attention_gates[3](torch.cat([dec1, enc1], dim=1))
        dec1 = dec1 * att1
        
        # 最终输出
        output = self.final_conv(dec1)
        
        return output

class EnhancedCombinedModel(nn.Module):
    """增强版组合模型 - 集成所有优化组件"""
    def __init__(self, config):
        super().__init__()
        self.config = config
        
        # 配置参数
        vit_config = config['model'].get('vit_size', 'vit_large')
        input_dim = config['model']['num_output_vars']
        num_channels = config['model']['num_channels']
        img_size = config['model']['img_height']
        
        # 时间序列编码器
        self.temporal_encoder = TemporalSequenceEncoder(
            input_dim=input_dim,
            hidden_dim=config['model'].get('temporal_hidden_dim', 512),
            num_layers=config['model'].get('temporal_num_layers', 3),
            dropout=config['model'].get('temporal_dropout', 0.1)
        )
        
        # 增强版ViT同化模块
        self.vit_assimilation = EnhancedVitAssimilation(
            num_channels=num_channels,
            vit_config=vit_config,
            img_size=img_size
        )
        
        # 时空融合模块
        vit_dim = VIT_CONFIGS[vit_config]['embed_dim']
        temporal_dim = config['model'].get('temporal_hidden_dim', 512)
        self.spatiotemporal_fusion = SpatioTemporalFusion(
            vit_dim=vit_dim,
            temporal_dim=temporal_dim,
            num_heads=config['model'].get('fusion_heads', 16),
            dropout=config['model'].get('fusion_dropout', 0.1)
        )
        
        # 特征重塑层 - 将融合特征重塑为适合UNet的格式
        self.feature_reshape = nn.Sequential(
            nn.Linear(vit_dim, vit_dim),
            nn.LayerNorm(vit_dim),
            nn.GELU(),
            nn.Dropout(0.1)
        )
        
        # 增强版UNet降尺度
        self.unet_downscale = EnhancedUNetDownscale(
            in_channels=vit_dim,
            out_channels=input_dim,
            img_size=img_size
        )
        
        # 自适应物理约束
        self.physics_constraints = AdaptivePhysicsConstraints(
            input_dim=input_dim,
            output_dim=input_dim,
            config=config
        )
        
        # 时间序列输出投影 - 将UNet输出投影到时间序列
        self.temporal_projection = nn.Sequential(
            nn.Linear(input_dim, input_dim * 4),
            nn.GELU(),
            nn.Dropout(0.1),
            nn.Linear(input_dim * 4, input_dim)
        )
        
        # 最终输出层
        future_hours = config['data']['future_hours']
        self.output_layer = nn.Sequential(
            nn.Linear(input_dim, input_dim),
            nn.LayerNorm(input_dim),
            nn.GELU(),
            nn.Dropout(0.1),
            nn.Linear(input_dim, input_dim)
        )
        
        self._init_weights()
    
    def _init_weights(self):
        """初始化权重"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                trunc_normal_(m.weight, std=0.02)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)
    
    def forward(self, station_data, himawari_data):
        """
        Args:
            station_data: [batch_size, seq_len, input_dim] - 气象时间序列数据
            himawari_data: [batch_size, num_channels, height, width] - 卫星云图数据
        Returns:
            output: [batch_size, future_steps, output_dim] - 预测结果
        """
        batch_size = station_data.shape[0]
        future_hours = self.config['data']['future_hours']
        future_steps = future_hours * 4  # 15分钟间隔
        
        # 时间序列编码
        temporal_features = self.temporal_encoder(station_data)
        # 使用最后一个时间步的特征进行融合
        temporal_repr = temporal_features[:, -1, :]  # [batch_size, temporal_dim]
        
        # ViT特征提取
        vit_features = self.vit_assimilation(himawari_data)
        # [batch_size, num_patches, vit_dim]
        
        # 扩展时间特征以匹配空间特征
        temporal_expanded = temporal_repr.unsqueeze(1).expand(-1, vit_features.size(1), -1)
        
        # 时空融合
        fused_features = self.spatiotemporal_fusion(vit_features, temporal_expanded)
        
        # 特征重塑
        enhanced_features = self.feature_reshape(fused_features)
        
        # 重塑为空间格式用于UNet
        patch_size = 16
        h = w = int(math.sqrt(enhanced_features.size(1)))
        spatial_features = enhanced_features.transpose(1, 2).view(
            batch_size, -1, h, w
        )
        
        # UNet降尺度
        downscaled = self.unet_downscale(spatial_features)
        
        # 物理约束
        constrained_output = self.physics_constraints(downscaled)
        
        # 重塑回时间序列格式
        constrained_flat = constrained_output.view(batch_size, -1, h*w).transpose(1, 2)
        
        # 时间序列投影
        temporal_output = self.temporal_projection(constrained_flat)
        
        # 扩展到未来时间步
        output_sequence = temporal_output.unsqueeze(1).expand(-1, future_steps, -1)
        
        # 最终输出
        final_output = self.output_layer(output_sequence)
        
        return final_output

def create_enhanced_model(config):
    """创建增强版模型"""
    model = EnhancedCombinedModel(config)
    
    # 计算参数数量
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    print(f"Enhanced Model Created:")
    print(f"  Total parameters: {total_params:,}")
    print(f"  Trainable parameters: {trainable_params:,}")
    print(f"  ViT config: {config['model'].get('vit_size', 'vit_large')}")
    print(f"  Temporal hidden dim: {config['model'].get('temporal_hidden_dim', 512)}")
    
    return model

if __name__ == "__main__":
    # 测试模型
    import yaml
    
    # 加载配置
    with open('config.yaml', 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
    
    # 创建模型
    model = create_enhanced_model(config)
    
    # 测试前向传播
    batch_size = 2
    seq_len = 480  # 5天 * 24小时 * 4个15分钟点
    input_dim = 3
    num_channels = 4
    height, width = 224, 224
    
    station_data = torch.randn(batch_size, seq_len, input_dim)
    himawari_data = torch.randn(batch_size, num_channels, height, width)
    
    with torch.no_grad():
        output = model(station_data, himawari_data)
    
    print(f"\nForward pass test:")
    print(f"  Input shapes: station_data={station_data.shape}, himawari_data={himawari_data.shape}")
    print(f"  Output shape: {output.shape}")
    print(f"  Expected output shape: ({batch_size}, {config['data']['future_hours']*4}, {input_dim})")
    
    # 验证输出形状
    expected_shape = (batch_size, config['data']['future_hours']*4, input_dim)
    assert output.shape == expected_shape, f"Output shape mismatch: {output.shape} vs {expected_shape}"
    print("✓ Model test passed!")
