import torch
import torch.nn as nn
import timm
import segmentation_models_pytorch as smp
import numpy as np

class VitAssimilation(nn.Module):
    def __init__(self, img_size=(224, 224), patch_size=(8, 8), num_channels=4, num_classes=10):
        super().__init__()
        # 使用支持自定义输入通道数的ViT模型
        self.vit = timm.create_model(
            "vit_base_patch16_224",
            img_size=img_size,
            patch_size=patch_size,
            in_chans=num_channels,  # 动态设置输入通道数，支持4波段
            num_classes=num_classes,
            pretrained=False
        )
        
        # Store original classifier for feature extraction
        self.original_classifier = self.vit.head
        self.vit.head = nn.Identity()  # Remove classification head
        
    def forward(self, x):
        # x shape: [batch_size, channels, height, width] - 现在支持4通道
        features = self.vit(x)  # Extract features
        return features

class UnetDownscale(nn.Module):
    def __init__(self, encoder_name="resnet34", decoder_scale=4, classes=5):
        super().__init__()
        self.unet = smp.Unet(
            encoder_name=encoder_name,
            encoder_weights=None,
            decoder_attention_type="scse",
            classes=classes,
            activation=None
        )
        self.scale_factor = decoder_scale
        
    def forward(self, x):
        # x shape: [batch_size, channels, height, width]
        output = self.unet(x)
        return output

class AdvectionLayer(nn.Module):
    """Physical constraint layer for wind field advection"""
    def __init__(self):
        super().__init__()
        
    def forward(self, u, v, phi):
        """
        u: east-west wind component [batch, height, width]
        v: north-south wind component [batch, height, width] 
        phi: scalar field to advect [batch, height, width]
        """
        # Compute spatial gradients
        grad_x = torch.gradient(phi, dim=2)[0]  # gradient along width dimension
        grad_y = torch.gradient(phi, dim=1)[0]  # gradient along height dimension
        
        # Compute advection term: - (u * ∂φ/∂x + v * ∂φ/∂y)
        advection = -1 * (u * grad_x + v * grad_y)
        return advection

class TerrainCorrectionLayer(nn.Module):
    """Physical constraint layer for terrain effects"""
    def __init__(self, terrain_height=None):
        super().__init__()
        if terrain_height is not None:
            self.terrain_height = nn.Parameter(torch.tensor(terrain_height, dtype=torch.float32))
        else:
            self.terrain_height = None
            
    def forward(self, temperature, elevation=None):
        """
        temperature: temperature field [batch, height, width]
        elevation: terrain elevation (if provided) [height, width]
        """
        if self.terrain_height is not None:
            # Apply lapse rate: ~6.5°C per 1000m
            lapse_rate = -6.5 / 1000  # °C per meter
            terrain_effect = lapse_rate * self.terrain_height
            corrected_temperature = temperature + terrain_effect
        else:
            corrected_temperature = temperature
            
        return corrected_temperature

class CombinedModel(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.config = config
        model_config = config['model']
        
        # ViT for satellite data assimilation
        self.vit_assimilation = VitAssimilation(
            img_size=(model_config['img_height'], model_config['img_width']),
            patch_size=model_config['vit_patch_size'],
            num_channels=model_config['num_channels'],
            num_classes=128  # Feature dimension
        )
        
        # UNet for downscaling
        self.unet_downscale = UnetDownscale(
            encoder_name=model_config['unet_encoder'],
            decoder_scale=model_config['unet_scale'],
            classes=model_config['num_output_vars']
        )
        
        # Physical constraint layers
        self.advection_layer = AdvectionLayer()
        self.terrain_correction = TerrainCorrectionLayer()
        
        # Station data processing - dynamic input size - 优化为3个核心变量
        # Calculate input size: historical_days * 24 hours * 4 (15min intervals) * 3 features
        historical_points = config['data']['historical_days'] * 24 * 4
        station_input_size = historical_points * 3
        
        self.station_encoder = nn.Sequential(
            nn.Linear(station_input_size, 256),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(256, 128),
            nn.ReLU()
        )
        
        # Fusion layer - ViT outputs 768 features, station encoder outputs 128
        # Calculate output size: future_hours * 4 (15min intervals) * num_output_vars
        future_points = config['data']['future_hours'] * 4
        output_size = future_points * model_config['num_output_vars']
        
        self.fusion_layer = nn.Sequential(
            nn.Linear(768 + 128, 256),  # ViT features (768) + station features (128)
            nn.ReLU(),
            nn.Dropout(0.3),
            nn.Linear(256, output_size)
        )
        
    def forward(self, station_data, himawari_data):
        """
        station_data: [batch_size, historical_days, 3] (3 core station variables: DNI, DHI, Temperature)
        himawari_data: [batch_size, channels, height, width] - 现在支持4通道
        """
        batch_size = station_data.size(0)
        
        # Process satellite data with ViT
        satellite_features = self.vit_assimilation(himawari_data)  # [batch_size, 128]
        
        # Process station data
        station_flat = station_data.reshape(batch_size, -1)  # Flatten historical days using reshape
        station_features = self.station_encoder(station_flat)  # [batch_size, 128]
        
        # Fuse features
        fused_features = torch.cat([satellite_features, station_features], dim=1)
        raw_output = self.fusion_layer(fused_features)
        
        # Reshape to [batch_size, future_time_points, num_output_vars]
        future_points = self.config['data']['future_hours'] * 4  # 15min intervals
        output = raw_output.view(batch_size, future_points, 
                               self.config['model']['num_output_vars'])
        
        return output
    
    def apply_physical_constraints(self, predictions, current_conditions):
        """
        Apply physical constraints to predictions
        predictions: [batch_size, future_time_points, num_output_vars]
        current_conditions: dict with current wind fields, etc.
        """
        batch_size, future_time_points, num_vars = predictions.shape
        
        # Apply advection constraint to appropriate variables
        if 'u_wind' in current_conditions and 'v_wind' in current_conditions:
            u = current_conditions['u_wind']
            v = current_conditions['v_wind']
            
            # Apply advection to relevant variables (e.g., temperature, humidity)
            for time_point in range(future_time_points):
                # This is a simplified example - would need proper implementation
                # based on the specific physics of the system
                pass
        
        return predictions

def create_model(config):
    """Factory function to create the complete model"""
    return CombinedModel(config)

if __name__ == "__main__":
    # Test the model with 4-band configuration
    config = {
        'data': {'historical_days': 5, 'future_hours': 24},
        'model': {
            'vit_patch_size': [8, 8],
            'unet_encoder': 'resnet34',
            'unet_scale': 4,
            'num_channels': 4,  # 4波段配置
            'img_height': 224,
            'img_width': 224,
            'num_output_vars': 3
        }
    }
    
    model = CombinedModel(config)
    
    # Test with dummy data - 修正维度
    batch_size = 2
    historical_points = config['data']['historical_days'] * 24 * 4  # 5*24*4=480
    station_data = torch.randn(batch_size, historical_points, 3)  # [2, 480, 3]
    himawari_data = torch.randn(batch_size, 4, 224, 224)  # 4-channel satellite data
    
    output = model(station_data, himawari_data)
    print(f"Input station data shape: {station_data.shape}")
    print(f"Input satellite data shape: {himawari_data.shape}")
    print(f"Output shape: {output.shape}")  # Should be [2, 96, 3] (24*4=96 time points, 3 vars)
