import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from torch.utils.data import Dataset, DataLoader
import os
import matplotlib.pyplot as plt
import torch.nn.functional as F
import pandas as pd

class AttentionBlock(nn.Module):
    """空间注意力模块"""
    def __init__(self, in_channels):
        super(AttentionBlock, self).__init__()
        self.conv = nn.Conv2d(in_channels, 1, kernel_size=1)
        self.sigmoid = nn.Sigmoid()
        
    def forward(self, x):
        attention = self.sigmoid(self.conv(x))
        return x * attention

class ChannelAttention(nn.Module):
    """通道注意力模块"""
    def __init__(self, in_channels, reduction_ratio=16):
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)
        
        # 共享MLP
        self.mlp = nn.Sequential(
            nn.Conv2d(in_channels, in_channels // reduction_ratio, kernel_size=1, bias=False),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels // reduction_ratio, in_channels, kernel_size=1, bias=False)
        )
        
        self.sigmoid = nn.Sigmoid()
    
    def forward(self, x):
        avg_out = self.mlp(self.avg_pool(x))
        max_out = self.mlp(self.max_pool(x))
        out = self.sigmoid(avg_out + max_out)
        return x * out

class SimpleUNet(nn.Module):
    def __init__(self, in_channels=6, out_channels=1, dropout_rate=0.2):
        super(SimpleUNet, self).__init__()
        
        # 编码器部分
        self.enc1 = nn.Sequential(
            nn.Conv2d(in_channels, 64, kernel_size=3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout_rate),
            nn.Conv2d(64, 64, kernel_size=3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True)
        )
        
        # 添加局部细节增强卷积
        self.detail_conv1 = nn.Sequential(
            nn.Conv2d(64, 64, kernel_size=1, padding=0),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True)
        )
        
        # 添加高频特征增强层 - 使用小卷积核
        self.high_freq1 = nn.Sequential(
            nn.Conv2d(64, 64, kernel_size=3, padding=1, groups=4),  # 分组卷积增强局部相关性
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True)
        )
        
        self.enc2 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout_rate),
            nn.Conv2d(128, 128, kernel_size=3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True)
        )
        
        # 添加局部细节增强卷积
        self.detail_conv2 = nn.Sequential(
            nn.Conv2d(128, 128, kernel_size=1, padding=0),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True)
        )
        
        # 添加高频特征增强层
        self.high_freq2 = nn.Sequential(
            nn.Conv2d(128, 128, kernel_size=3, padding=1, groups=8),  # 分组卷积增强局部相关性
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True)
        )
        
        self.enc3 = nn.Sequential(
            nn.Conv2d(128, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout_rate),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True)
        )
        
        # 添加局部细节增强卷积
        self.detail_conv3 = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=1, padding=0),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True)
        )
        
        # 添加高频特征增强层
        self.high_freq3 = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, padding=1, groups=16),  # 分组卷积增强局部相关性
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True)
        )
        
        # 新增更深层次的编码器
        self.enc4 = nn.Sequential(
            nn.Conv2d(256, 512, kernel_size=3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout_rate),
            nn.Conv2d(512, 512, kernel_size=3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True)
        )
        
        # 解码器部分
        self.dec4 = nn.Sequential(
            nn.Conv2d(512, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout_rate),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True)
        )
        
        self.dec3 = nn.Sequential(
            nn.Conv2d(256 + 256, 128, kernel_size=3, padding=1),  # 增加跳跃连接
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout_rate),
            nn.Conv2d(128, 128, kernel_size=3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True)
        )
        
        self.dec2 = nn.Sequential(
            nn.Conv2d(128 + 128, 64, kernel_size=3, padding=1),  # 增加跳跃连接
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout_rate),
            nn.Conv2d(64, 64, kernel_size=3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True)
        )
        
        self.dec1 = nn.Sequential(
            nn.Conv2d(64 + 64, 32, kernel_size=3, padding=1),  # 增加跳跃连接
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout_rate),
            nn.Conv2d(32, 32, kernel_size=3, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True)
        )
        
        # 数值预测分支
        self.value_branch = nn.Sequential(
            nn.Conv2d(32, 32, kernel_size=3, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            nn.Conv2d(32, 16, kernel_size=3, padding=1),
            nn.BatchNorm2d(16),
            nn.ReLU(inplace=True),
            nn.Conv2d(16, out_channels, kernel_size=1)
        )
        
        # 下采样和上采样
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
        
        # 添加注意力机制
        self.attention1 = AttentionBlock(64)
        self.attention2 = AttentionBlock(128)
        self.attention3 = AttentionBlock(256)
        
        # 改进注意力机制 - 加入通道注意力
        self.channel_attention = ChannelAttention(32)
        
        # 修改空间平滑层，减少平滑效果
        self.smooth = nn.Sequential(
            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )
        
        # 添加残差连接
        self.residual = nn.Conv2d(in_channels, out_channels, kernel_size=1)
        
        # 添加最终输出的细节增强层
        self.final_detail = nn.Sequential(
            nn.Conv2d(32, 32, kernel_size=3, padding=1, groups=4),  # 分组卷积增强局部特征
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            nn.Conv2d(32, 32, kernel_size=3, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True)
        )
        
        # 初始化权重，避免网络陷入不良局部最优
        self._initialize_weights()
    
    def _initialize_weights(self):
        """初始化网络权重，使用He初始化"""
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
    
    def forward(self, x):
        # 保存输入用于残差连接
        identity = self.residual(x)
        
        # 编码器
        enc1 = self.enc1(x)
        # 增强局部特征
        enc1 = enc1 + self.detail_conv1(enc1)
        # 增强高频特征
        high_freq1 = self.high_freq1(enc1)
        enc1 = enc1 + 0.2 * high_freq1  # 减少高频部分的权重，避免过分关注细节
        enc1_att = self.attention1(enc1)
        x = self.pool(enc1_att)
        
        enc2 = self.enc2(x)
        # 增强局部特征
        enc2 = enc2 + self.detail_conv2(enc2)
        # 增强高频特征
        high_freq2 = self.high_freq2(enc2)
        enc2 = enc2 + 0.2 * high_freq2  # 减少高频部分的权重
        enc2_att = self.attention2(enc2)
        x = self.pool(enc2_att)
        
        enc3 = self.enc3(x)
        # 增强局部特征
        enc3 = enc3 + self.detail_conv3(enc3)
        # 增强高频特征
        high_freq3 = self.high_freq3(enc3)
        enc3 = enc3 + 0.2 * high_freq3  # 减少高频部分的权重
        enc3_att = self.attention3(enc3)
        x = self.pool(enc3_att)
        
        # 新增的编码层
        enc4 = self.enc4(x)
        x = self.pool(enc4)
        
        # 中间层（瓶颈）
        x = F.dropout(x, 0.4, self.training)  # 增大dropout比例，减少过拟合
        
        # 解码器
        x = self.up(x)
        # 确保尺寸匹配
        if x.shape[2:] != enc4.shape[2:]:
            x = F.interpolate(x, size=enc4.shape[2:], mode='bilinear', align_corners=True)
        x = self.dec4(x)
        
        x = self.up(x)
        # 确保尺寸匹配
        if x.shape[2:] != enc3_att.shape[2:]:
            x = F.interpolate(x, size=enc3_att.shape[2:], mode='bilinear', align_corners=True)
        # 增强型跳跃连接
        x = torch.cat([x, enc3_att], dim=1)
        x = self.dec3(x)
        
        x = self.up(x)
        # 确保尺寸匹配
        if x.shape[2:] != enc2_att.shape[2:]:
            x = F.interpolate(x, size=enc2_att.shape[2:], mode='bilinear', align_corners=True)
        # 增强型跳跃连接
        x = torch.cat([x, enc2_att], dim=1)
        x = self.dec2(x)
        
        x = self.up(x)
        # 确保尺寸匹配
        if x.shape[2:] != enc1_att.shape[2:]:
            x = F.interpolate(x, size=enc1_att.shape[2:], mode='bilinear', align_corners=True)
        # 增强型跳跃连接
        x = torch.cat([x, enc1_att], dim=1)
        x = self.dec1(x)
        
        # 添加通道注意力
        x = self.channel_attention(x)
        
        # 添加最终细节增强，但减少其影响
        x = x + 0.3 * self.final_detail(x)  # 减少细节增强的权重
        
        # 预测值
        values = self.value_branch(x)
        
        # 应用空间平滑 (减轻平滑程度)
        values = values + 0.5 * self.smooth(values)  # 更进一步减少平滑效果的影响
        
        # 调整残差连接的尺寸以匹配预测值
        if values.shape[2:] != identity.shape[2:]:
            identity = F.interpolate(identity, size=values.shape[2:], mode='bilinear', align_corners=True)
        
        # 降低残差连接的比重，进一步增加局部差异
        out = values + 0.3 * identity  # 进一步降低残差连接权重
        
        # 调整输出尺寸以匹配输入
        if out.shape[2:] != x.shape[2:]:
            out = F.interpolate(out, size=(355, 618), mode='bilinear', align_corners=True)
        
        return out

class WeatherDataset(Dataset):
    def __init__(self, data_dir, augment=True, test_mode=False):
        self.data_dir = data_dir
        self.augment = augment  # 是否使用数据增强
        self.test_mode = test_mode  # 测试模式标志
        
        # 加载所有变量数据
        self.variables = ['shum', 'temp', 'wind', 'prec', 'pres', 'lrad', 'srad']
        self.data = {}
        self.masks = {}
        
        # 加载数据和掩码
        min_length = float('inf')
        for var in self.variables:
            data_path = os.path.join(data_dir, f'{var}_cropped.npy')
            mask_path = os.path.join(data_dir, f'{var}_mask.npy')
            
            if os.path.exists(data_path):
                self.data[var] = np.load(data_path)
                self.masks[var] = np.load(mask_path)
                min_length = min(min_length, self.data[var].shape[0])
                print(f"加载 {var} 数据，形状为 {self.data[var].shape}")
        
        # 将所有数据截断到相同长度
        for var in self.variables:
            if self.data[var].shape[0] > min_length:
                print(f"截断 {var} 数据从 {self.data[var].shape[0]} 到 {min_length}")
                self.data[var] = self.data[var][:min_length]
        
        # 确保所有变量具有相同的掩码
        self.mask = self.masks['srad']
        
        # 计算有效数据点的数量
        self.valid_points = np.sum(self.mask)
        print(f"总有效点数：{self.valid_points}")
        
        # 计算每个变量的统计信息
        self.stats = {}
        for var in self.variables:
            # 仅对有效数据点计算统计信息
            valid_data = self.data[var][:, self.mask.astype(bool)]
            valid_data = valid_data[~np.isnan(valid_data)]
            
            # 计算分位数
            q1 = np.percentile(valid_data, 25)
            q3 = np.percentile(valid_data, 75)
            iqr = q3 - q1
            
            self.stats[var] = {
                'mean': np.mean(valid_data),
                'std': np.std(valid_data),
                'min': np.min(valid_data),
                'max': np.max(valid_data),
                'q1': q1,
                'q3': q3,
                'iqr': iqr
            }
            
            print(f"{var} 统计信息:")
            for key, value in self.stats[var].items():
                print(f"  {key}: {value:.4f}")
        
        # 保存数据形状，用于增强操作
        self.data_shape = self.data['srad'][0].shape

    def __len__(self):
        return self.data['srad'].shape[0]  # 使用srad的时间步数
        
    def __getitem__(self, idx):
        # 组合输入特征
        input_vars = ['shum', 'temp', 'wind', 'prec', 'pres', 'lrad']
        features = []
        
        # 获取原始数据
        for var in input_vars:
            feat = self.data[var][idx].copy()
            valid_mask = self.mask.astype(bool)
            
            # 使用改进的标准化方法
            feat[valid_mask] = self._normalize_robust(feat[valid_mask], var)
            
            # 无效区域填充为0
            feat[~valid_mask] = 0
            
            # 条件性应用空间平滑，强度更低
            if var not in ['wind', 'prec']:  # 对风速和降水不进行平滑，保留局部特征
                feat = self.apply_spatial_smoothing(feat, sigma=0.05)
            
            features.append(feat)
        
        x = np.stack(features, axis=0)
        
        # 目标变量处理
        target = self.data['srad'][idx].copy()
        valid_mask = self.mask.astype(bool)
        
        # 标准化目标变量
        target[valid_mask] = self._normalize_robust(target[valid_mask], 'srad')
        
        # 无效区域填充为0
        target[~valid_mask] = 0
        
        # 不对目标值进行平滑，保留原始的局部差异
        
        y = target[np.newaxis, :, :]
        
        # 应用增强，但在测试模式下不增强
        if self.augment and not self.test_mode:
            x, y = self._apply_augmentation(x, y, valid_mask)
        
        # 转换为张量
        x = torch.FloatTensor(x)
        y = torch.FloatTensor(y)
        mask = torch.FloatTensor(self.mask).unsqueeze(0)
        
        return x, y, mask
    
    def _normalize_robust(self, data, var):
        """使用更稳健的归一化方法"""
        # 使用改进的Z-score标准化，更好地处理异常值
        scaled_data = (data - self.stats[var]['mean']) / (self.stats[var]['std'] + 1e-8)
        
        # 定义更宽的截断范围，保留更多数据变化
        return np.clip(scaled_data, -4.0, 4.0)
    
    def _apply_augmentation(self, x, y, valid_mask):
        """应用数据增强策略"""
        # 1. 随机噪声
        if np.random.rand() < 0.5:
            # 为每个通道添加不同程度的高斯噪声
            noise_level = np.random.uniform(0.01, 0.05)
            for c in range(x.shape[0]):
                noise = np.random.normal(0, noise_level, size=x[c].shape)
                # 仅对有效区域添加噪声
                x[c][valid_mask] += noise[valid_mask]
        
        # 2. 随机遮挡 - 模拟传感器故障或缺失数据
        if np.random.rand() < 0.3:
            # 创建随机矩形遮挡区域
            h, w = self.data_shape
            mask_h = np.random.randint(h // 8, h // 4)
            mask_w = np.random.randint(w // 8, w // 4)
            mask_top = np.random.randint(0, h - mask_h)
            mask_left = np.random.randint(0, w - mask_w)
            
            # 应用遮挡，只在有效区域内
            for c in range(x.shape[0]):
                temp_mask = valid_mask.copy()
                temp_mask[mask_top:mask_top+mask_h, mask_left:mask_left+mask_w] = False
                temp_mask = temp_mask & valid_mask  # 确保只遮挡有效区域
                
                # 保存原始值
                original_values = x[c].copy()
                
                # 将区域设置为附近值的平均值或0
                if np.random.rand() < 0.5:
                    x[c][~temp_mask & valid_mask] = 0
                else:
                    # 使用周围区域的均值填充
                    border = 5
                    for i in range(mask_top, mask_top+mask_h):
                        for j in range(mask_left, mask_left+mask_w):
                            if valid_mask[i, j]:
                                # 获取周围的有效值
                                min_i = max(0, i-border)
                                max_i = min(h, i+border+1)
                                min_j = max(0, j-border)
                                max_j = min(w, j+border+1)
                                
                                surround = original_values[min_i:max_i, min_j:max_j][valid_mask[min_i:max_i, min_j:max_j]]
                                if len(surround) > 0:
                                    x[c][i, j] = np.mean(surround)
                                else:
                                    x[c][i, j] = 0
        
        # 3. 特征放大/减弱 - 模拟温度/湿度等变化
        if np.random.rand() < 0.4:
            for c in range(x.shape[0]):
                scale_factor = np.random.uniform(0.8, 1.2)
                x[c][valid_mask] *= scale_factor
        
        # 4. 空间平滑度变化 - 模拟不同的天气系统边界
        if np.random.rand() < 0.3:
            smooth_sigma = np.random.uniform(0.01, 0.1)
            for c in range(x.shape[0]):
                if np.random.rand() < 0.5:  # 只对部分通道应用
                    x[c] = self.apply_spatial_smoothing(x[c], smooth_sigma)
        
        # 5. 局部强化 - 增强特定区域的变化
        if np.random.rand() < 0.3:
            h, w = self.data_shape
            region_h = np.random.randint(h // 4, h // 2)
            region_w = np.random.randint(w // 4, w // 2)
            region_top = np.random.randint(0, h - region_h)
            region_left = np.random.randint(0, w - region_w)
            
            # 在随机选择的区域内增强特征对比度
            for c in range(x.shape[0]):
                if np.random.rand() < 0.5:  # 只对部分通道应用
                    region = x[c][region_top:region_top+region_h, region_left:region_left+region_w]
                    valid_region = valid_mask[region_top:region_top+region_h, region_left:region_left+region_w]
                    if np.any(valid_region):
                        # 增强对比度
                        mean_val = np.mean(region[valid_region])
                        contrast_factor = np.random.uniform(1.1, 1.5)
                        region[valid_region] = mean_val + (region[valid_region] - mean_val) * contrast_factor
                        x[c][region_top:region_top+region_h, region_left:region_left+region_w] = region
        
        return x, y
    
    def apply_spatial_smoothing(self, data, sigma=0.05):
        """应用空间平滑，但强度可变"""
        from scipy.ndimage import gaussian_filter
        return gaussian_filter(data, sigma=sigma)

class EnhancedMSELoss(nn.Module):
    def __init__(self, alpha=0.005, beta=0.3, gamma=0.2, detail_lambda=0.1, smoothness_lambda=0.02):
        super(EnhancedMSELoss, self).__init__()
        self.alpha = alpha            # 空间平滑性权重
        self.beta = beta              # 相对误差权重
        self.gamma = gamma            # L1损失权重（减少极端值影响）
        self.detail_lambda = detail_lambda  # 细节保留权重
        self.smoothness_lambda = smoothness_lambda  # 平滑度权重
        
    def compute_gradient_penalty(self, x):
        """计算空间梯度惩罚"""
        grad_x = torch.abs(x[:, :, :, 1:] - x[:, :, :, :-1])
        grad_y = torch.abs(x[:, :, 1:, :] - x[:, :, :-1, :])
        return grad_x.mean() + grad_y.mean()
    
    def compute_relative_error(self, pred, target):
        """计算相对误差"""
        # 避免除以0
        denominator = torch.max(torch.abs(target), torch.ones_like(target) * 1e-6)
        return torch.abs(pred - target) / denominator
    
    def compute_detail_loss(self, pred, target, mask):
        """计算高频细节损失"""
        # 多尺度高通滤波
        kernel_sizes = [3, 5, 7]  # 使用多种尺寸的核
        total_loss = 0.0
        
        for kernel_size in kernel_sizes:
            # 构造拉普拉斯滤波器
            laplacian_kernel = self._create_laplacian_kernel(kernel_size).to(pred.device)
            
            # 对预测和目标应用高通滤波
            padding = kernel_size // 2
            pred_pad = F.pad(pred, (padding, padding, padding, padding), mode='reflect')
            target_pad = F.pad(target, (padding, padding, padding, padding), mode='reflect')
            
            # 对每个通道应用滤波
            batch_size, channels = pred.shape[:2]
            pred_details = []
            target_details = []
            
            for i in range(channels):
                pred_channel = pred_pad[:, i:i+1]
                target_channel = target_pad[:, i:i+1]
                
                # 应用卷积滤波
                pred_detail = F.conv2d(pred_channel, laplacian_kernel)
                target_detail = F.conv2d(target_channel, laplacian_kernel)
                
                pred_details.append(pred_detail)
                target_details.append(target_detail)
            
            pred_high_freq = torch.cat(pred_details, dim=1)
            target_high_freq = torch.cat(target_details, dim=1)
            
            # 计算高频特征的损失，使用焦点损失，聚焦于难以预测的区域
            high_freq_diff = torch.abs(pred_high_freq - target_high_freq)
            # 自适应权重 - 对于较大误差区域给予更多关注
            weights = torch.exp(high_freq_diff) - 1.0
            weights = torch.clamp(weights, 0.5, 3.0)  # 限制权重范围
            
            scale_loss = (high_freq_diff * weights * mask).sum() / (mask.sum() + 1e-8)
            total_loss += scale_loss
        
        # 平均所有尺度的损失
        return total_loss / len(kernel_sizes)
    
    def _create_laplacian_kernel(self, kernel_size):
        """创建拉普拉斯滤波器核"""
        kernel = torch.zeros((1, 1, kernel_size, kernel_size))
        center = kernel_size // 2
        
        for i in range(kernel_size):
            for j in range(kernel_size):
                if i == center and j == center:
                    kernel[0, 0, i, j] = -1.0 * (kernel_size**2 - 1)
                else:
                    kernel[0, 0, i, j] = 1.0
                    
        return kernel
    
    def compute_variation_loss(self, pred, mask):
        """计算总变差损失，鼓励平滑但不过度"""
        # 计算水平和垂直方向的差分
        tv_h = torch.abs(pred[:, :, :, :-1] - pred[:, :, :, 1:])
        tv_v = torch.abs(pred[:, :, :-1, :] - pred[:, :, 1:, :])
        
        # 只计算有效区域的总变差
        tv_h = tv_h * mask[:, :, :, :-1]
        tv_v = tv_v * mask[:, :, :-1, :]
        
        # 非线性的总变差，对较小的变化使用较大的惩罚，保留较大的边缘变化
        tv_h = torch.sqrt(tv_h + 1e-6)
        tv_v = torch.sqrt(tv_v + 1e-6)
        
        # 计算平均值
        tv_h_mean = tv_h.sum() / (mask[:, :, :, :-1].sum() + 1e-8)
        tv_v_mean = tv_v.sum() / (mask[:, :, :-1, :].sum() + 1e-8)
        
        return tv_h_mean + tv_v_mean
    
    def focal_mse_loss(self, pred, target, mask, gamma=2.0):
        """焦点MSE损失，集中在难以预测的区域"""
        mse = (pred - target) ** 2
        # 计算焦点权重 - 较大误差获得更大权重
        weights = torch.exp(gamma * mse) - 1.0
        weights = torch.clamp(weights, 0.5, 5.0)  # 限制权重范围
        
        # 应用权重和掩码
        weighted_mse = (mse * weights * mask).sum() / (mask.sum() + 1e-8)
        return weighted_mse
    
    def forward(self, pred, target, mask):
        """计算增强的损失"""
        # 基本损失计算
        # 1. 焦点MSE损失
        focal_loss = self.focal_mse_loss(pred, target, mask)
        
        # 2. L1损失（对极端值不那么敏感）
        l1 = torch.abs(pred - target)
        masked_l1 = (l1 * mask).sum() / (mask.sum() + 1e-8)
        
        # 3. 相对误差损失
        relative_error = self.compute_relative_error(pred, target)
        masked_rel_error = (relative_error * mask).sum() / (mask.sum() + 1e-8)
        
        # 4. 高频细节保留损失
        detail_loss = self.compute_detail_loss(pred, target, mask)
        
        # 5. 总变差损失 - 平滑但保留边缘
        tv_loss = self.compute_variation_loss(pred, mask)
        
        # 组合所有损失项
        total_loss = (
            focal_loss +
            self.gamma * masked_l1 +
            self.beta * masked_rel_error +
            self.detail_lambda * detail_loss +
            self.smoothness_lambda * tv_loss
        )
        
        return total_loss

def denormalize_predictions(pred_values, pred_zero, srad_stats):
    """改进的反归一化预测结果函数"""
    pred_values = pred_values.cpu().numpy()
    pred_zero = pred_zero.cpu().numpy()
    
    # 使用更严格的0值概率阈值
    zero_threshold = 0.05  # 降低零值判断的阈值
    zero_mask = pred_zero > zero_threshold
    
    # 对预测值进行后处理
    # 1. 首先处理确定的0值区域
    pred_values[zero_mask] = 0
    
    # 2. 处理非0值区域
    non_zero_mask = ~zero_mask
    if np.any(non_zero_mask):
        # 使用对数空间的统计信息进行反归一化
        log_values = pred_values[non_zero_mask] * srad_stats['log_std'] + srad_stats['log_mean']
        pred_values[non_zero_mask] = np.expm1(log_values)
        
        # 对小于阈值的值进行处理
        small_values_mask = pred_values < 0.05  # 使用与数据集相同的阈值
        pred_values[small_values_mask] = 0
    
    return pred_values

def train_and_evaluate(model, train_loader, test_loader, criterion, optimizer, device, dataset, num_epochs=50, save_dir='results-improved'):
    """训练模型并评估结果"""
    os.makedirs(save_dir, exist_ok=True)
    model.train()
    losses = []
    
    # 获取srad的统计信息
    srad_stats = dataset.stats['srad']
    
    # 使用One-Cycle学习率调度器，帮助更快收敛并避免局部最优
    scheduler = optim.lr_scheduler.OneCycleLR(
        optimizer, 
        max_lr=0.002,
        steps_per_epoch=len(train_loader),
        epochs=num_epochs,
        pct_start=0.3,  # 在前30%的训练中达到最大学习率
        div_factor=25,  # 初始学习率 = max_lr/25
        final_div_factor=1000  # 最终学习率 = max_lr/1000
    )
    
    # 早停机制
    best_val_loss = float('inf')
    patience = 15
    patience_counter = 0
    best_model_path = os.path.join(save_dir, 'best_model.pth')
    last_checkpoint_path = os.path.join(save_dir, 'last_checkpoint.pth')
    
    # 分离出验证集
    val_size = int(0.2 * len(train_loader.dataset))
    train_size = len(train_loader.dataset) - val_size
    train_dataset, val_dataset = torch.utils.data.random_split(
        train_loader.dataset, [train_size, val_size]
    )
    
    # 重新创建数据加载器，测试时不使用增强
    train_loader_new = DataLoader(train_dataset, batch_size=train_loader.batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=train_loader.batch_size, shuffle=False)
    
    # 权重衰减调整
    weight_decay_initial = 1e-5
    weight_decay_schedule = lambda epoch: weight_decay_initial * (1 + 0.05 * epoch)
    
    # 混合精度训练加速
    scaler = torch.cuda.amp.GradScaler() if torch.cuda.is_available() else None
    
    # 学习率预热
    warmup_epochs = 3
    warmup_scheduler = None
    if warmup_epochs > 0:
        warmup_scheduler = optim.lr_scheduler.LinearLR(
            optimizer, start_factor=0.1, end_factor=1.0, total_iters=warmup_epochs * len(train_loader_new)
        )
    
    # 训练循环
    for epoch in range(num_epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        batch_count = 0
        
        # 更新权重衰减
        current_weight_decay = weight_decay_schedule(epoch)
        for param_group in optimizer.param_groups:
            param_group['weight_decay'] = current_weight_decay
        
        for inputs, targets, masks in train_loader_new:
            inputs, targets = inputs.to(device), targets.to(device)
            masks = masks.to(device)
            
            optimizer.zero_grad()
            
            # 混合精度训练
            if scaler is not None:
                with torch.cuda.amp.autocast():
                    pred_values = model(inputs)
                    
                    # 确保输出与目标形状一致
                    if pred_values.shape != targets.shape:
                        pred_values = F.interpolate(pred_values, size=(targets.shape[2], targets.shape[3]), 
                                                 mode='bilinear', align_corners=True)
                    
                    loss = criterion(pred_values, targets, masks)
                
                if torch.isnan(loss).item():
                    print(f"警告: 第{epoch+1}个epoch的第{batch_count+1}个batch出现NaN损失")
                    continue
                
                # 缩放梯度并反向传播
                scaler.scale(loss).backward()
                # 梯度裁剪，防止梯度爆炸
                scaler.unscale_(optimizer)
                torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
                scaler.step(optimizer)
                scaler.update()
            else:
                # 标准训练流程
                pred_values = model(inputs)
                
                # 确保输出与目标形状一致
                if pred_values.shape != targets.shape:
                    pred_values = F.interpolate(pred_values, size=(targets.shape[2], targets.shape[3]), 
                                             mode='bilinear', align_corners=True)
                
                try:
                    loss = criterion(pred_values, targets, masks)
                    
                    if torch.isnan(loss).item():
                        print(f"警告: 第{epoch+1}个epoch的第{batch_count+1}个batch出现NaN损失")
                        continue
                    
                    loss.backward()
                    # 梯度裁剪，防止梯度爆炸
                    torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
                    optimizer.step()
                    
                except RuntimeError as e:
                    print(f"运行时错误: {e}")
                    continue
            
            running_loss += loss.item()
            batch_count += 1
            
            # 更新学习率
            if epoch < warmup_epochs and warmup_scheduler is not None:
                warmup_scheduler.step()
            else:
                scheduler.step()
                
        # 每个epoch结束时保存检查点，便于恢复训练
        if epoch % 5 == 0 or epoch == num_epochs - 1:
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'scheduler_state_dict': scheduler.state_dict() if scheduler else None,
                'loss': running_loss / batch_count if batch_count > 0 else float('inf'),
            }, last_checkpoint_path)
        
        if batch_count > 0:
            train_loss = running_loss / batch_count
            
            # 验证阶段
            model.eval()
            val_loss = 0.0
            val_batch_count = 0
            
            with torch.no_grad():
                for inputs, targets, masks in val_loader:
                    inputs, targets = inputs.to(device), targets.to(device)
                    masks = masks.to(device)
                    
                    pred_values = model(inputs)
                    
                    if pred_values.shape != targets.shape:
                        pred_values = F.interpolate(pred_values, size=(targets.shape[2], targets.shape[3]), 
                                                  mode='bilinear', align_corners=True)
                    
                    loss = criterion(pred_values, targets, masks)
                    val_loss += loss.item()
                    val_batch_count += 1
            
            if val_batch_count > 0:
                val_loss = val_loss / val_batch_count
                
                # 记录损失
                losses.append({'epoch': epoch+1, 'train_loss': train_loss, 'val_loss': val_loss})
                print(f'Epoch [{epoch+1}/{num_epochs}], Train Loss: {train_loss:.6f}, Val Loss: {val_loss:.6f}')
                
                # 可视化验证集中的一个样本结果
                if epoch % 5 == 0 or epoch == num_epochs - 1:
                    visualize_epoch_results(model, val_loader, device, srad_stats, epoch, save_dir)
                
                # 早停检查
                if val_loss < best_val_loss:
                    best_val_loss = val_loss
                    patience_counter = 0
                    # 保存最佳模型
                    torch.save({
                        'model_state_dict': model.state_dict(),
                        'optimizer_state_dict': optimizer.state_dict(),
                        'scheduler_state_dict': scheduler.state_dict() if scheduler else None,
                        'srad_stats': srad_stats,
                        'val_loss': val_loss,
                        'epoch': epoch
                    }, best_model_path)
                    print(f"保存最佳模型，epoch {epoch+1} 验证损失 {val_loss:.6f}")
                else:
                    patience_counter += 1
                    if patience_counter >= patience:
                        print(f"早停在epoch {epoch+1}")
                        break
    
    # 加载最佳模型进行评估
    checkpoint = torch.load(best_model_path)
    model.load_state_dict(checkpoint['model_state_dict'])
    print(f"加载最佳模型，来自epoch {checkpoint['epoch']+1} 验证损失 {checkpoint['val_loss']:.6f}")
    
    # 创建测试数据集 - 设置test_mode=True以关闭增强
    test_dataset = WeatherDataset(dataset.data_dir, augment=False, test_mode=True)
    test_loader_noaug = DataLoader(test_dataset, batch_size=test_loader.batch_size, shuffle=False)
    
    # 创建预测结果保存目录
    pred_save_dir = os.path.join(save_dir, 'predictions')
    os.makedirs(pred_save_dir, exist_ok=True)
    
    # 评估和可视化
    model.eval()
    all_metrics = []
    
    # 初始化汇总数据
    all_preds = []
    all_targets = []
    all_masks = []
    
    # 评估特征重要性
    feature_importance = evaluate_feature_importance(model, test_loader_noaug, criterion, device)
    
    # 保存特征重要性
    with open(os.path.join(save_dir, 'feature_importance.txt'), 'w') as f:
        for feature, importance in feature_importance.items():
            f.write(f"{feature}: {importance:.6f}\n")
    
    with torch.no_grad():
        for i, (inputs, targets, masks) in enumerate(test_loader_noaug):
            if i >= 20:  # 只处理前20个样本
                break
            
            inputs, targets = inputs.to(device), targets.to(device)
            masks = masks.to(device)
            
            # 预测
            pred_values = model(inputs)
            
            if pred_values.shape != targets.shape:
                pred_values = F.interpolate(pred_values, size=(targets.shape[2], targets.shape[3]), 
                                         mode='bilinear', align_corners=True)
            
            # 收集汇总数据
            all_preds.append(pred_values.cpu().numpy())
            all_targets.append(targets.cpu().numpy())
            all_masks.append(masks.cpu().numpy())
            
            # 反归一化预测结果
            pred_denorm = pred_values.cpu().numpy() * srad_stats['std'] + srad_stats['mean']
            
            # 保存预测结果为CSV（矩阵形式）
            mask_np = masks[0, 0].cpu().numpy().astype(bool)  # 确保掩码是布尔类型
            pred_matrix = pred_denorm[0, 0].copy()
            pred_matrix[~mask_np] = np.nan  # 将无效区域设为NaN
            
            # 保存预测结果
            df_pred = pd.DataFrame(pred_matrix)
            df_pred.to_csv(os.path.join(pred_save_dir, f'pred_time_slice_{i+1}.csv'), index=False, na_rep='NaN')
            
            # 保存真实值
            true_matrix = targets[0, 0].cpu().numpy() * srad_stats['std'] + srad_stats['mean']
            true_matrix[~mask_np] = np.nan
            df_true = pd.DataFrame(true_matrix)
            df_true.to_csv(os.path.join(pred_save_dir, f'true_time_slice_{i+1}.csv'), index=False, na_rep='NaN')
            
            # 评估指标计算
            valid_mask = masks.cpu().numpy()[0, 0] > 0
            if np.any(valid_mask):
                true_values = targets.cpu().numpy()[0, 0] * srad_stats['std'] + srad_stats['mean']
                pred_values_np = pred_denorm[0, 0]
                
                # 计算MSE和MAE
                mse = np.mean((pred_values_np[valid_mask] - true_values[valid_mask]) ** 2)
                mae = np.mean(np.abs(pred_values_np[valid_mask] - true_values[valid_mask]))
                # 计算相对误差
                rel_err = np.mean(np.abs(pred_values_np[valid_mask] - true_values[valid_mask]) / 
                                  (np.abs(true_values[valid_mask]) + 1e-6))
                # 计算R²
                corr_matrix = np.corrcoef(pred_values_np[valid_mask], true_values[valid_mask])
                r_squared = corr_matrix[0, 1]**2 if corr_matrix.size > 1 else 0
                
                # 计算预测值的多样性指标
                uniqueness = compute_uniqueness(pred_values_np[valid_mask])
                entropy = compute_entropy(pred_values_np[valid_mask])
                
                metrics = {
                    'sample': i,
                    'mse': mse,
                    'mae': mae,
                    'rel_err': rel_err,
                    'r_squared': r_squared,
                    'uniqueness': uniqueness,
                    'entropy': entropy
                }
                all_metrics.append(metrics)
                
                print(f"样本 {i} 评估指标 - MSE: {mse:.4f}, MAE: {mae:.4f}, 相对误差: {rel_err:.4f}, R²: {r_squared:.4f}")
                print(f"   多样性指标 - 唯一性: {uniqueness:.4f}, 熵: {entropy:.4f}")
            
            # 可视化结果
            visualize_prediction(inputs, true_matrix, pred_matrix, masks[0, 0], i, pred_save_dir)
    
    # 汇总评估指标
    if all_metrics:
        metrics_df = pd.DataFrame(all_metrics)
        metrics_df.to_csv(os.path.join(save_dir, 'evaluation_metrics.csv'), index=False)
        
        # 计算并保存汇总指标
        summary_metrics = {
            'mse_mean': metrics_df['mse'].mean(),
            'mse_std': metrics_df['mse'].std(),
            'mae_mean': metrics_df['mae'].mean(),
            'mae_std': metrics_df['mae'].std(),
            'rel_err_mean': metrics_df['rel_err'].mean(),
            'rel_err_std': metrics_df['rel_err'].std(),
            'r_squared_mean': metrics_df['r_squared'].mean(),
            'r_squared_std': metrics_df['r_squared'].std(),
            'uniqueness_mean': metrics_df['uniqueness'].mean(),
            'entropy_mean': metrics_df['entropy'].mean(),
        }
        
        # 保存汇总指标
        with open(os.path.join(save_dir, 'summary_metrics.txt'), 'w') as f:
            for key, value in summary_metrics.items():
                f.write(f"{key}: {value:.6f}\n")
        
        # 绘制评估指标条形图
        visualize_metrics(metrics_df, save_dir)
        
        # 绘制预测值与真实值的对比散点图
        visualize_scatter_comparison(all_preds, all_targets, all_masks, srad_stats, save_dir)
    
    # 绘制损失曲线
    visualize_loss_curves(losses, save_dir)
    
    # 保存模型和训练结果
    torch.save({
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'scheduler_state_dict': scheduler.state_dict() if scheduler else None,
        'srad_stats': srad_stats,
        'losses': losses,
        'metrics': all_metrics
    }, os.path.join(save_dir, 'photovoltaic_unet.pth'))

def compute_uniqueness(values):
    """计算值的唯一性，衡量重复值的程度"""
    unique_values = np.unique(values)
    return len(unique_values) / len(values)

def compute_entropy(values):
    """计算值分布的熵，衡量分布的多样性"""
    # 将连续值离散化为直方图
    hist, _ = np.histogram(values, bins=30)
    # 计算概率分布
    probs = hist / np.sum(hist)
    # 移除概率为0的区间
    probs = probs[probs > 0]
    # 计算熵
    return -np.sum(probs * np.log2(probs))

def evaluate_feature_importance(model, data_loader, criterion, device):
    """评估不同输入特征的重要性"""
    feature_names = ['shum', 'temp', 'wind', 'prec', 'pres', 'lrad']
    importance = {name: 0.0 for name in feature_names}
    
    model.eval()
    with torch.no_grad():
        # 获取基准性能
        baseline_loss = 0.0
        count = 0
        
        for inputs, targets, masks in data_loader:
            if count >= 10:  # 只使用前10个批次，加速计算
                break
                
            inputs, targets = inputs.to(device), targets.to(device)
            masks = masks.to(device)
            
            pred = model(inputs)
            if pred.shape != targets.shape:
                pred = F.interpolate(pred, size=targets.shape[2:], mode='bilinear', align_corners=True)
                
            baseline_loss += criterion(pred, targets, masks).item()
            count += 1
        
        baseline_loss /= count
        
        # 对每个特征进行扰动测试
        for i, feature in enumerate(feature_names):
            perturbed_loss = 0.0
            count = 0
            
            for inputs, targets, masks in data_loader:
                if count >= 10:
                    break
                    
                inputs, targets = inputs.to(device), targets.to(device)
                masks = masks.to(device)
                
                # 扰动单个特征
                perturbed_inputs = inputs.clone()
                perturbed_inputs[:, i, :, :] = torch.zeros_like(perturbed_inputs[:, i, :, :])
                
                pred = model(perturbed_inputs)
                if pred.shape != targets.shape:
                    pred = F.interpolate(pred, size=targets.shape[2:], mode='bilinear', align_corners=True)
                    
                perturbed_loss += criterion(pred, targets, masks).item()
                count += 1
            
            perturbed_loss /= count
            
            # 重要性 = 扰动后损失增加量
            importance[feature] = perturbed_loss - baseline_loss
    
    # 归一化重要性分数
    total = sum(importance.values())
    if total > 0:
        for feature in importance:
            importance[feature] /= total
    
    return importance

def visualize_epoch_results(model, val_loader, device, srad_stats, epoch, save_dir):
    """可视化当前epoch的模型预测结果"""
    viz_save_dir = os.path.join(save_dir, 'epoch_visualizations')
    os.makedirs(viz_save_dir, exist_ok=True)
    
    model.eval()
    with torch.no_grad():
        # 获取一个批次
        inputs, targets, masks = next(iter(val_loader))
        inputs, targets = inputs.to(device), targets.to(device)
        masks = masks.to(device)
        
        # 进行预测
        pred_values = model(inputs)
        if pred_values.shape != targets.shape:
            pred_values = F.interpolate(pred_values, size=targets.shape[2:], mode='bilinear', align_corners=True)
        
        # 反归一化
        pred_denorm = pred_values.cpu().numpy() * srad_stats['std'] + srad_stats['mean']
        target_denorm = targets.cpu().numpy() * srad_stats['std'] + srad_stats['mean']
        
        # 应用掩码
        mask_np = masks[0, 0].cpu().numpy().astype(bool)
        pred_matrix = pred_denorm[0, 0].copy()
        pred_matrix[~mask_np] = np.nan
        
        true_matrix = target_denorm[0, 0].copy()
        true_matrix[~mask_np] = np.nan
        
        # 可视化
        visualize_prediction(inputs, true_matrix, pred_matrix, masks[0, 0], epoch, viz_save_dir)

def visualize_prediction(inputs, true_matrix, pred_matrix, mask, index, save_dir):
    """可视化单个预测结果"""
    fig, axes = plt.subplots(2, 2, figsize=(12, 10))
    
    # 计算所有有效值的范围
    mask_np = mask.cpu().numpy().astype(bool) if isinstance(mask, torch.Tensor) else mask.astype(bool)
    valid_true = true_matrix[~np.isnan(true_matrix)]
    valid_pred = pred_matrix[~np.isnan(pred_matrix)]
    
    # 使用真实值的范围作为统一的范围
    vmin = np.nanmin(valid_true)
    vmax = np.nanmax(valid_true)
    
    # 创建统一的颜色映射
    norm = plt.Normalize(vmin=vmin, vmax=vmax)
    cmap = plt.cm.viridis
    
    # 旋转图像180度并进行左右镜像翻转
    input_avg = np.rot90(inputs.cpu().numpy().mean(axis=1)[0], 2)
    true_matrix_rotated = np.rot90(true_matrix, 2)
    pred_matrix_rotated = np.rot90(pred_matrix, 2)
    
    # 左右镜像翻转
    input_avg = np.fliplr(input_avg)
    true_matrix_rotated = np.fliplr(true_matrix_rotated)
    pred_matrix_rotated = np.fliplr(pred_matrix_rotated)
    
    im0 = axes[0, 0].imshow(input_avg, cmap='viridis')
    axes[0, 0].set_title('Input Average')
    fig.colorbar(im0, ax=axes[0, 0])
    
    # 使用相同的颜色映射和归一化显示真实值和预测值
    im1 = axes[0, 1].imshow(true_matrix_rotated, cmap=cmap, norm=norm)
    axes[0, 1].set_title('True Values')
    cbar1 = fig.colorbar(im1, ax=axes[0, 1])
    cbar1.set_label('Value')
    
    im2 = axes[1, 0].imshow(pred_matrix_rotated, cmap=cmap, norm=norm)
    axes[1, 0].set_title('Predicted Values')
    cbar2 = fig.colorbar(im2, ax=axes[1, 0])
    cbar2.set_label('Value')
    
    # 计算相对误差
    relative_error = np.abs(pred_matrix - true_matrix) / (np.abs(true_matrix) + 1e-6)
    relative_error_rotated = np.rot90(relative_error, 2)
    relative_error_rotated = np.fliplr(relative_error_rotated)  # 左右镜像翻转
    
    # 对相对误差使用不同的colormap以便区分
    im3 = axes[1, 1].imshow(relative_error_rotated, cmap='plasma', vmin=0, vmax=1.0)
    axes[1, 1].set_title('Relative Error')
    fig.colorbar(im3, ax=axes[1, 1])
    
    plt.tight_layout()
    plt.savefig(os.path.join(save_dir, f'prediction_{index}.png'))
    plt.close()

def visualize_metrics(metrics_df, save_dir):
    """可视化评估指标"""
    plt.figure(figsize=(14, 10))
    
    plt.subplot(2, 3, 1)
    plt.bar(metrics_df['sample'], metrics_df['mse'], color='royalblue')
    plt.title('MSE by Sample')
    plt.xlabel('Sample Index')
    plt.ylabel('MSE')
    plt.grid(axis='y', linestyle='--', alpha=0.7)
    
    plt.subplot(2, 3, 2)
    plt.bar(metrics_df['sample'], metrics_df['mae'], color='darkorange')
    plt.title('MAE by Sample')
    plt.xlabel('Sample Index')
    plt.ylabel('MAE')
    plt.grid(axis='y', linestyle='--', alpha=0.7)
    
    plt.subplot(2, 3, 3)
    plt.bar(metrics_df['sample'], metrics_df['rel_err'], color='forestgreen')
    plt.title('Relative Error by Sample')
    plt.xlabel('Sample Index')
    plt.ylabel('Relative Error')
    plt.grid(axis='y', linestyle='--', alpha=0.7)
    
    plt.subplot(2, 3, 4)
    plt.bar(metrics_df['sample'], metrics_df['r_squared'], color='purple')
    plt.title('R² by Sample')
    plt.xlabel('Sample Index')
    plt.ylabel('R²')
    plt.grid(axis='y', linestyle='--', alpha=0.7)
    
    plt.subplot(2, 3, 5)
    plt.bar(metrics_df['sample'], metrics_df['uniqueness'], color='crimson')
    plt.title('Uniqueness by Sample')
    plt.xlabel('Sample Index')
    plt.ylabel('Uniqueness')
    plt.grid(axis='y', linestyle='--', alpha=0.7)
    
    plt.subplot(2, 3, 6)
    plt.bar(metrics_df['sample'], metrics_df['entropy'], color='teal')
    plt.title('Entropy by Sample')
    plt.xlabel('Sample Index')
    plt.ylabel('Entropy')
    plt.grid(axis='y', linestyle='--', alpha=0.7)
    
    plt.tight_layout()
    plt.savefig(os.path.join(save_dir, 'metrics_summary.png'))
    plt.close()

def visualize_loss_curves(losses, save_dir):
    """可视化训练和验证损失曲线"""
    plt.figure(figsize=(10, 6))
    epochs = [item['epoch'] for item in losses]
    train_losses = [item['train_loss'] for item in losses]
    val_losses = [item['val_loss'] for item in losses]
    
    plt.plot(epochs, train_losses, 'b-', label='Training Loss')
    plt.plot(epochs, val_losses, 'r-', label='Validation Loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.title('Training and Validation Loss')
    plt.legend()
    plt.grid(True)
    plt.tight_layout()
    plt.savefig(os.path.join(save_dir, 'loss_curve.png'))
    plt.close()

def visualize_scatter_comparison(all_preds, all_targets, all_masks, srad_stats, save_dir):
    """绘制预测值与真实值的散点图比较"""
    # 合并所有批次的数据
    preds = np.concatenate([p[0, 0] for p in all_preds], axis=0).flatten()
    targets = np.concatenate([t[0, 0] for t in all_targets], axis=0).flatten()
    masks = np.concatenate([m[0, 0] for m in all_masks], axis=0).flatten()
    
    # 反归一化
    preds = preds * srad_stats['std'] + srad_stats['mean']
    targets = targets * srad_stats['std'] + srad_stats['mean']
    
    # 只保留有效区域的数据
    valid_indices = masks > 0
    valid_preds = preds[valid_indices]
    valid_targets = targets[valid_indices]
    
    # 随机采样点，避免点太多导致图像过于密集
    if len(valid_preds) > 5000:
        indices = np.random.choice(len(valid_preds), 5000, replace=False)
        valid_preds = valid_preds[indices]
        valid_targets = valid_targets[indices]
    
    plt.figure(figsize=(10, 10))
    plt.scatter(valid_targets, valid_preds, alpha=0.5, s=5)
    
    # 添加对角线，表示完美预测
    min_val = min(np.min(valid_targets), np.min(valid_preds))
    max_val = max(np.max(valid_targets), np.max(valid_preds))
    plt.plot([min_val, max_val], [min_val, max_val], 'r--')
    
    plt.xlabel('True Values')
    plt.ylabel('Predicted Values')
    plt.title('Predicted vs True Values')
    plt.grid(True)
    plt.tight_layout()
    plt.savefig(os.path.join(save_dir, 'scatter_comparison.png'))
    plt.close()

def main():
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f'使用设备: {device}')
    
    # 设置随机种子，保证结果可重复性
    seed = 42
    torch.manual_seed(seed)
    np.random.seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)
    
    # 创建数据集
    dataset = WeatherDataset('../autodl-tmp', augment=True, test_mode=False)
    
    # 创建模型
    model = SimpleUNet(in_channels=6, out_channels=1, dropout_rate=0.25)  # 增加dropout比例
    
    # 初始化模型权重
    def init_weights(m):
        if isinstance(m, nn.Conv2d):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            if m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            nn.init.constant_(m.weight, 1)
            nn.init.constant_(m.bias, 0)
    
    model.apply(init_weights)
    model = model.to(device)
    
    # 定义损失函数和优化器
    criterion = EnhancedMSELoss(alpha=0.005, beta=0.3, gamma=0.2, detail_lambda=0.1, smoothness_lambda=0.02)
    
    # 使用AdamW优化器，具有更好的权重衰减特性
    optimizer = optim.AdamW(model.parameters(), lr=0.001, weight_decay=1e-5, amsgrad=True)
    
    # 创建数据加载器
    train_size = int(0.8 * len(dataset))
    test_size = len(dataset) - train_size
    train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])
    
    # 创建测试集，不使用数据增强
    test_dataset_noaug = WeatherDataset('../autodl-tmp', augment=False, test_mode=True)
    # 使用与训练集相同的索引来获取测试数据，确保数据对应
    indices = test_dataset.indices
    test_dataset_noaug = torch.utils.data.Subset(test_dataset_noaug, indices)
    
    # 使用更小的批量大小，减少内存占用并增加模型更新频率
    train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=4 if device.type == 'cuda' else 0)
    test_loader = DataLoader(test_dataset_noaug, batch_size=4, shuffle=False, num_workers=2 if device.type == 'cuda' else 0)
    
    # 训练和评估模型
    result_dir = 'results-optimized-diversity'
    train_and_evaluate(model, train_loader, test_loader, criterion, optimizer, device, dataset, num_epochs=60, save_dir=result_dir)

if __name__ == '__main__':
    main()