#!/usr/bin/env python
"""
卫星图像超分辨率模型训练和压缩示例 - 修复版
修复了压缩参数过于激进和加载不一致的问题
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset, random_split
import numpy as np
import copy
import time
import json
import gzip
import matplotlib.pyplot as plt
from tqdm import tqdm
from PIL import Image
import math


# 定义简化的SRCNN模型
class SimpleSRCNN(nn.Module):
    def __init__(self, scale_factor=4):
        super(SimpleSRCNN, self).__init__()
        self.scale_factor = scale_factor
        self.upscale = nn.Upsample(scale_factor=scale_factor, mode='bicubic', align_corners=False)
        self.net = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=9, padding=4),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 32, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.Conv2d(32, 3, kernel_size=5, padding=2)
        )

    def forward(self, x):
        x = self.upscale(x)
        return self.net(x)


# 定义简化的EDSR模型
class SimpleEDSR(nn.Module):
    class ResBlock(nn.Module):
        def __init__(self, channels):
            super(SimpleEDSR.ResBlock, self).__init__()
            self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
            self.relu = nn.ReLU(inplace=True)
            self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)

        def forward(self, x):
            residual = x
            out = self.relu(self.conv1(x))
            out = self.conv2(out)
            out = out + residual
            return out

    def __init__(self, scale=4, num_blocks=8):
        super(SimpleEDSR, self).__init__()
        self.scale = scale

        # 头部特征提取
        self.head = nn.Conv2d(3, 64, kernel_size=3, padding=1)

        # 残差块
        self.body = nn.Sequential(*[self.ResBlock(64) for _ in range(num_blocks)])
        self.tail_conv1 = nn.Conv2d(64, 64, kernel_size=3, padding=1)

        # 上采样
        if scale == 2 or scale == 3:
            self.upsampler = nn.Sequential(
                nn.Conv2d(64, 64 * (scale ** 2), kernel_size=3, padding=1),
                nn.PixelShuffle(scale)
            )
        elif scale == 4:
            self.upsampler = nn.Sequential(
                nn.Conv2d(64, 64 * 4, kernel_size=3, padding=1),
                nn.PixelShuffle(2),
                nn.Conv2d(64, 64 * 4, kernel_size=3, padding=1),
                nn.PixelShuffle(2)
            )

        # 输出
        self.output = nn.Conv2d(64, 3, kernel_size=3, padding=1)

    def forward(self, x):
        x = self.head(x)
        res = x

        x = self.body(x)
        x = self.tail_conv1(x)
        x = x + res

        x = self.upsampler(x)
        x = self.output(x)

        return x


# 定义改进的ESPCN模型
class SimpleESPCN(nn.Module):
    def __init__(self, scale_factor=4, num_channels=3, feature_channels=64):
        super(SimpleESPCN, self).__init__()
        self.scale_factor = scale_factor

        # 特征提取
        self.feature_extraction = nn.Sequential(
            nn.Conv2d(num_channels, feature_channels, kernel_size=5, padding=2),
            nn.Tanh(),
            nn.Conv2d(feature_channels, feature_channels // 2, kernel_size=3, padding=1),
            nn.Tanh(),
            nn.Conv2d(feature_channels // 2, num_channels * (scale_factor ** 2), kernel_size=3, padding=1)
        )

        # 亚像素卷积层
        self.pixel_shuffle = nn.PixelShuffle(scale_factor)

    def forward(self, x):
        x = self.feature_extraction(x)
        x = self.pixel_shuffle(x)
        return x


# 改进的卫星图像超分辨率数据集
class SatelliteSRDataset(Dataset):
    def __init__(self, size=1000, scale=4, hr_size=128):
        self.size = size
        self.scale = scale
        self.hr_size = hr_size
        self.lr_size = hr_size // scale

        print(f"生成 {size} 个卫星图像超分辨率样本...")
        self.data = []

        for i in tqdm(range(size)):
            hr_img, lr_img = self._generate_satellite_pair()
            self.data.append((lr_img, hr_img))

    def _generate_satellite_pair(self):
        """生成高分辨率卫星图像和对应的低分辨率版本"""
        # 生成具有卫星图像特征的高分辨率图像
        hr_img = self._generate_satellite_image(self.hr_size)

        # 通过下采样生成低分辨率图像
        lr_img = F.interpolate(
            hr_img.unsqueeze(0),
            size=(self.lr_size, self.lr_size),
            mode='bicubic',
            align_corners=False
        ).squeeze(0)

        # 添加轻微噪声来模拟传感器噪声
        noise = torch.randn_like(lr_img) * 0.02
        lr_img = torch.clamp(lr_img + noise, 0, 1)

        return hr_img, lr_img

    def _generate_satellite_image(self, size):
        """生成具有卫星图像特征的图像"""
        img = torch.zeros(3, size, size)

        # 随机选择场景类型
        scene_type = np.random.choice(['urban', 'rural', 'forest', 'coastal', 'desert'])

        if scene_type == 'urban':
            img = self._generate_urban_scene(size)
        elif scene_type == 'rural':
            img = self._generate_rural_scene(size)
        elif scene_type == 'forest':
            img = self._generate_forest_scene(size)
        elif scene_type == 'coastal':
            img = self._generate_coastal_scene(size)
        else:  # desert
            img = self._generate_desert_scene(size)

        # 添加高频细节和纹理
        img = self._add_high_frequency_details(img, size)

        return torch.clamp(img, 0, 1)

    def _generate_urban_scene(self, size):
        """生成城市场景"""
        img = torch.zeros(3, size, size)

        # 基础背景 - 混凝土色
        base_color = torch.tensor([0.6, 0.6, 0.6]).view(3, 1, 1)
        img += base_color

        # 添加建筑物
        num_buildings = np.random.randint(5, 15)
        for _ in range(num_buildings):
            # 建筑物大小和位置
            w, h = np.random.randint(8, size // 4), np.random.randint(8, size // 4)
            x, y = np.random.randint(0, size - w), np.random.randint(0, size - h)

            # 建筑物颜色 - 灰色调
            building_color = torch.tensor([
                np.random.uniform(0.4, 0.8),
                np.random.uniform(0.4, 0.8),
                np.random.uniform(0.4, 0.8)
            ]).view(3, 1, 1)

            img[:, y:y + h, x:x + w] = building_color

        # 添加道路网格
        self._add_road_grid(img, size)

        return img

    def _generate_rural_scene(self, size):
        """生成乡村场景"""
        img = torch.zeros(3, size, size)

        # 基础背景 - 土地色
        base_color = torch.tensor([0.4, 0.5, 0.3]).view(3, 1, 1)
        img += base_color

        # 添加农田（规整的矩形）
        num_fields = np.random.randint(3, 8)
        for _ in range(num_fields):
            w, h = np.random.randint(size // 6, size // 3), np.random.randint(size // 6, size // 3)
            x, y = np.random.randint(0, size - w), np.random.randint(0, size - h)

            # 农田颜色 - 绿色或棕色调
            if np.random.random() > 0.5:
                field_color = torch.tensor([0.2, 0.7, 0.3]).view(3, 1, 1)  # 绿色作物
            else:
                field_color = torch.tensor([0.6, 0.4, 0.2]).view(3, 1, 1)  # 翻耕土地

            img[:, y:y + h, x:x + w] = field_color

        # 添加少量建筑
        num_buildings = np.random.randint(1, 4)
        for _ in range(num_buildings):
            w, h = np.random.randint(4, 12), np.random.randint(4, 12)
            x, y = np.random.randint(0, size - w), np.random.randint(0, size - h)

            building_color = torch.tensor([0.7, 0.6, 0.5]).view(3, 1, 1)
            img[:, y:y + h, x:x + w] = building_color

        return img

    def _generate_forest_scene(self, size):
        """生成森林场景"""
        img = torch.zeros(3, size, size)

        # 基础森林背景
        base_color = torch.tensor([0.15, 0.4, 0.2]).view(3, 1, 1)
        img += base_color

        # 添加树木簇
        num_clusters = np.random.randint(8, 20)
        for _ in range(num_clusters):
            center_x = np.random.randint(10, size - 10)
            center_y = np.random.randint(10, size - 10)
            radius = np.random.randint(5, 15)

            # 创建不规则的树木簇
            y_coords, x_coords = torch.meshgrid(
                torch.arange(size),
                torch.arange(size),
                indexing='ij'
            )

            # 椭圆形树木簇
            a, b = radius, radius * np.random.uniform(0.6, 1.4)
            mask = ((x_coords - center_x) / a) ** 2 + ((y_coords - center_y) / b) ** 2 <= 1

            # 树木颜色变化
            tree_color = torch.tensor([
                np.random.uniform(0.1, 0.3),
                np.random.uniform(0.3, 0.6),
                np.random.uniform(0.1, 0.4)
            ]).view(3, 1, 1)

            for c in range(3):
                img[c][mask] = tree_color[c, 0, 0]

        return img

    def _generate_coastal_scene(self, size):
        """生成海岸场景"""
        img = torch.zeros(3, size, size)

        # 水体区域
        water_height = np.random.randint(size // 3, 2 * size // 3)
        water_color = torch.tensor([0.1, 0.3, 0.7]).view(3, 1, 1)
        img[:, :water_height, :] = water_color

        # 海滩/沙地
        beach_height = np.random.randint(5, 15)
        beach_color = torch.tensor([0.8, 0.7, 0.5]).view(3, 1, 1)
        img[:, water_height:water_height + beach_height, :] = beach_color

        # 陆地植被
        land_color = torch.tensor([0.2, 0.5, 0.3]).view(3, 1, 1)
        if water_height + beach_height < size:
            img[:, water_height + beach_height:, :] = land_color

        return img

    def _generate_desert_scene(self, size):
        """生成沙漠场景"""
        img = torch.zeros(3, size, size)

        # 基础沙漠色调
        base_color = torch.tensor([0.8, 0.6, 0.4]).view(3, 1, 1)
        img += base_color

        # 添加沙丘（圆形和椭圆形区域）
        num_dunes = np.random.randint(3, 8)
        for _ in range(num_dunes):
            center_x = np.random.randint(10, size - 10)
            center_y = np.random.randint(10, size - 10)
            radius = np.random.randint(8, 20)

            y_coords, x_coords = torch.meshgrid(
                torch.arange(size),
                torch.arange(size),
                indexing='ij'
            )

            mask = (x_coords - center_x) ** 2 + (y_coords - center_y) ** 2 <= radius ** 2

            # 沙丘颜色略有不同
            dune_color = torch.tensor([
                np.random.uniform(0.75, 0.9),
                np.random.uniform(0.55, 0.7),
                np.random.uniform(0.35, 0.5)
            ]).view(3, 1, 1)

            for c in range(3):
                img[c][mask] = dune_color[c, 0, 0]

        return img

    def _add_road_grid(self, img, size):
        """添加道路网格"""
        road_color = torch.tensor([0.3, 0.3, 0.3]).view(3, 1, 1)
        road_width = 2

        # 水平道路
        if np.random.random() > 0.5:
            road_y = np.random.randint(road_width, size - road_width)
            img[:, road_y - road_width:road_y + road_width, :] = road_color

        # 垂直道路
        if np.random.random() > 0.5:
            road_x = np.random.randint(road_width, size - road_width)
            img[:, :, road_x - road_width:road_x + road_width] = road_color

    def _add_high_frequency_details(self, img, size):
        """添加高频细节和纹理"""
        # 生成高频噪声来模拟细节
        detail_strength = 0.1
        high_freq = torch.randn(3, size, size) * detail_strength

        # 使用高通滤波器
        kernel = torch.tensor([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], dtype=torch.float32)
        kernel = kernel.unsqueeze(0).unsqueeze(0).repeat(3, 1, 1, 1)

        # 应用卷积
        img_padded = F.pad(img.unsqueeze(0), (1, 1, 1, 1), mode='reflect')
        high_freq_filtered = F.conv2d(img_padded, kernel, groups=3, padding=0).squeeze(0)

        # 混合原图和高频细节
        img = img + high_freq_filtered * 0.1 + high_freq

        return img

    def __len__(self):
        return self.size

    def __getitem__(self, idx):
        return self.data[idx]


def get_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="卫星图像超分辨率模型训练和压缩示例")

    # 模型选择
    parser.add_argument("--model", "-m", default="srcnn",
                        choices=["srcnn", "edsr", "espcn"],
                        help="要使用的模型架构")
    parser.add_argument("--scale", "-s", type=int, default=4,
                        choices=[2, 3, 4],
                        help="超分辨率倍数")

    # 数据和输出参数
    parser.add_argument("--output-dir", "-o", default="./output",
                        help="模型和结果的输出目录")
    parser.add_argument("--hr-size", type=int, default=128,
                        help="高分辨率图像大小 (默认: 128)")

    # 训练参数
    parser.add_argument("--epochs", "-e", type=int, default=30,
                        help="训练轮数 (默认: 30)")
    parser.add_argument("--batch-size", "-b", type=int, default=16,
                        help="批次大小 (默认: 16)")
    parser.add_argument("--learning-rate", "-lr", type=float, default=0.0001,
                        help="学习率 (默认: 0.0001)")
    parser.add_argument("--weight-decay", type=float, default=1e-4,
                        help="权重衰减 (默认: 1e-4)")
    parser.add_argument("--dataset-size", type=int, default=1000,
                        help="数据集大小 (默认: 1000)")

    # 修改后的压缩参数 - 更保守的默认值
    parser.add_argument("--bits", type=int, default=8,
                        help="量化位数 (默认: 8位，更保守)")
    parser.add_argument("--sparsity", type=float, default=0.3,
                        help="剪枝稀疏度 (默认: 0.3，更保守)")

    # 模式选择
    parser.add_argument("--mode", choices=['train', 'compress', 'both'], default='both',
                        help="运行模式: train(仅训练), compress(仅压缩), both(训练+压缩)")
    parser.add_argument("--pretrained-path", type=str, default=None,
                        help="预训练模型路径 (用于仅压缩模式)")

    return parser.parse_args()


def load_sr_model(model_name, scale):
    """加载超分辨率模型"""
    print(f"加载 {model_name} 模型用于 {scale}x 超分辨率...")

    if model_name == "srcnn":
        model = SimpleSRCNN(scale_factor=scale)
    elif model_name == "edsr":
        model = SimpleEDSR(scale=scale, num_blocks=8)
    elif model_name == "espcn":
        model = SimpleESPCN(scale_factor=scale)
    else:
        raise ValueError(f"不支持的模型: {model_name}")

    return model


def create_dataloaders(scale, batch_size=16, dataset_size=1000, hr_size=128):
    """创建超分辨率数据集加载器"""
    print(f"准备 {scale}x 超分辨率数据集...")

    # 创建完整数据集
    full_dataset = SatelliteSRDataset(
        size=dataset_size,
        scale=scale,
        hr_size=hr_size
    )

    # 划分训练集和测试集 (80%训练, 20%测试)
    train_size = int(0.8 * len(full_dataset))
    test_size = len(full_dataset) - train_size
    train_dataset, test_dataset = random_split(full_dataset, [train_size, test_size])

    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=2,
        pin_memory=True
    )

    test_loader = DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=2,
        pin_memory=True
    )

    print(f"训练集大小: {len(train_dataset)}")
    print(f"测试集大小: {len(test_dataset)}")

    return train_loader, test_loader


def calculate_psnr(img1, img2):
    """计算两个图像之间的PSNR"""
    mse = torch.mean((img1 - img2) ** 2)
    if mse == 0:
        return float('inf')
    return 20 * torch.log10(1.0 / torch.sqrt(mse))


def calculate_ssim(img1, img2, window_size=11, size_average=True):
    """计算SSIM（结构相似性指数）"""

    def gaussian_window(size, sigma=1.5):
        coords = torch.arange(size, dtype=torch.float32)
        coords -= size // 2
        g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))
        g /= g.sum()
        return g.outer(g).unsqueeze(0).unsqueeze(0)

    # 生成高斯窗口
    window = gaussian_window(window_size).to(img1.device)
    window = window.expand(img1.size(1), 1, window_size, window_size)

    # SSIM常数
    C1 = 0.01 ** 2
    C2 = 0.03 ** 2

    # 计算均值
    mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=img1.size(1))
    mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=img2.size(1))

    mu1_sq = mu1.pow(2)
    mu2_sq = mu2.pow(2)
    mu1_mu2 = mu1 * mu2

    # 计算方差和协方差
    sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=img1.size(1)) - mu1_sq
    sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=img2.size(1)) - mu2_sq
    sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=img1.size(1)) - mu1_mu2

    # SSIM计算
    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))

    if size_average:
        return ssim_map.mean()
    else:
        return ssim_map.mean(1).mean(1).mean(1)


def perceptual_loss(hr_fake, hr_real):
    """感知损失（简化版）"""
    # 使用L1损失作为简化的感知损失
    return F.l1_loss(hr_fake, hr_real)


def train_model(model, train_loader, test_loader, device, args):
    """训练超分辨率模型"""
    print("开始训练卫星图像超分辨率模型...")

    # 定义损失函数和优化器
    criterion_mse = nn.MSELoss()
    criterion_l1 = nn.L1Loss()

    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)

    # 记录训练历史
    train_losses = []
    train_psnrs = []
    test_psnrs = []

    best_test_psnr = 0.0
    best_model_state = None

    # 训练循环
    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        running_psnr = 0.0
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, (lr_imgs, hr_imgs) in enumerate(pbar):
            lr_imgs, hr_imgs = lr_imgs.to(device), hr_imgs.to(device)

            # 前向传播
            optimizer.zero_grad()
            sr_imgs = model(lr_imgs)

            # 计算损失（MSE + L1损失的组合）
            mse_loss = criterion_mse(sr_imgs, hr_imgs)
            l1_loss = criterion_l1(sr_imgs, hr_imgs)
            total_loss = 0.8 * mse_loss + 0.2 * l1_loss

            # 反向传播
            total_loss.backward()
            optimizer.step()

            # 计算PSNR
            with torch.no_grad():
                batch_psnr = 0
                for i in range(lr_imgs.size(0)):
                    psnr = calculate_psnr(sr_imgs[i], hr_imgs[i])
                    batch_psnr += psnr.item()
                batch_psnr /= lr_imgs.size(0)

            # 统计
            running_loss += total_loss.item()
            running_psnr += batch_psnr
            num_batches += 1

            # 更新进度条
            pbar.set_postfix({
                'Loss': f'{total_loss.item():.4f}',
                'PSNR': f'{batch_psnr:.2f}dB'
            })

        # 计算平均训练损失和PSNR
        avg_train_loss = running_loss / num_batches
        train_psnr = running_psnr / num_batches

        # 测试阶段
        test_psnr = evaluate_sr_model(model, test_loader, device)

        # 记录历史
        train_losses.append(avg_train_loss)
        train_psnrs.append(train_psnr)
        test_psnrs.append(test_psnr)

        # 保存最佳模型
        if test_psnr > best_test_psnr:
            best_test_psnr = test_psnr
            best_model_state = copy.deepcopy(model.state_dict())

        # 更新学习率
        scheduler.step()

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train PSNR: {train_psnr:.2f}dB, '
              f'Test PSNR: {test_psnr:.2f}dB')

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f'训练完成! 最佳测试PSNR: {best_test_psnr:.2f}dB')

    # 绘制训练曲线
    plot_training_curves(train_losses, train_psnrs, test_psnrs, args.output_dir)

    return model, best_test_psnr, {
        'train_losses': train_losses,
        'train_psnrs': train_psnrs,
        'test_psnrs': test_psnrs,
        'best_test_psnr': best_test_psnr
    }


def evaluate_sr_model(model, dataloader, device):
    """评估超分辨率模型性能 (PSNR)"""
    model.eval()
    total_psnr = 0
    count = 0

    with torch.no_grad():
        for lr_imgs, hr_imgs in dataloader:
            lr_imgs, hr_imgs = lr_imgs.to(device), hr_imgs.to(device)
            sr_imgs = model(lr_imgs)

            # 计算批次中每张图片的PSNR
            for i in range(lr_imgs.size(0)):
                psnr = calculate_psnr(sr_imgs[i], hr_imgs[i])
                total_psnr += psnr.item()
                count += 1

    avg_psnr = total_psnr / count if count > 0 else 0
    return avg_psnr


def plot_training_curves(train_losses, train_psnrs, test_psnrs, output_dir):
    """绘制训练曲线"""
    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(12, 4))

    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(epochs, train_losses, 'b-', label='Training Loss')
    plt.title('Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)

    # 绘制PSNR曲线
    plt.subplot(1, 2, 2)
    plt.plot(epochs, train_psnrs, 'b-', label='Training PSNR')
    plt.plot(epochs, test_psnrs, 'r-', label='Test PSNR')
    plt.title('Training and Test PSNR')
    plt.xlabel('Epoch')
    plt.ylabel('PSNR (dB)')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'training_curves.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"训练曲线已保存到: {os.path.join(output_dir, 'training_curves.png')}")


def visualize_sr_results(model, dataloader, device, output_dir, num_samples=4):
    """可视化超分辨率结果"""
    model.eval()

    fig, axes = plt.subplots(num_samples, 3, figsize=(12, 4 * num_samples))
    fig.suptitle('Satellite Image Super-Resolution Results', fontsize=16)

    with torch.no_grad():
        for i, (lr_imgs, hr_imgs) in enumerate(dataloader):
            if i >= num_samples:
                break

            lr_imgs, hr_imgs = lr_imgs.to(device), hr_imgs.to(device)
            sr_imgs = model(lr_imgs)

            # 取第一个样本
            lr_show = lr_imgs[0].cpu().permute(1, 2, 0).numpy()
            hr_show = hr_imgs[0].cpu().permute(1, 2, 0).numpy()
            sr_show = sr_imgs[0].cpu().permute(1, 2, 0).numpy()

            # 归一化显示
            lr_show = np.clip(lr_show, 0, 1)
            hr_show = np.clip(hr_show, 0, 1)
            sr_show = np.clip(sr_show, 0, 1)

            # 计算PSNR
            psnr = calculate_psnr(sr_imgs[0], hr_imgs[0]).item()

            axes[i, 0].imshow(lr_show)
            axes[i, 0].set_title('Low Resolution')
            axes[i, 0].axis('off')

            axes[i, 1].imshow(hr_show)
            axes[i, 1].set_title('High Resolution (GT)')
            axes[i, 1].axis('off')

            axes[i, 2].imshow(sr_show)
            axes[i, 2].set_title(f'Super Resolution (PSNR: {psnr:.2f}dB)')
            axes[i, 2].axis('off')

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'sr_results.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"超分辨率结果已保存到: {os.path.join(output_dir, 'sr_results.png')}")


def visualize_dataset_samples(dataloader, output_dir, num_samples=8):
    """可视化数据集样本"""
    fig, axes = plt.subplots(4, 4, figsize=(16, 16))
    fig.suptitle('Satellite Super-Resolution Dataset Samples', fontsize=16)

    sample_count = 0
    for batch_idx, (lr_imgs, hr_imgs) in enumerate(dataloader):
        if sample_count >= num_samples:
            break

        # 取第一个样本
        lr_show = lr_imgs[0].permute(1, 2, 0).numpy()
        hr_show = hr_imgs[0].permute(1, 2, 0).numpy()

        # 归一化显示
        lr_show = np.clip(lr_show, 0, 1)
        hr_show = np.clip(hr_show, 0, 1)

        # 上采样LR图像到与HR相同尺寸以便比较
        from PIL import Image
        lr_pil = Image.fromarray((lr_show * 255).astype(np.uint8))
        lr_upsampled = lr_pil.resize((hr_show.shape[1], hr_show.shape[0]), Image.BICUBIC)
        lr_upsampled = np.array(lr_upsampled) / 255.0

        # 计算每行显示的位置
        row = sample_count // 2
        col_lr = (sample_count % 2) * 2
        col_hr = col_lr + 1

        # 显示低分辨率图像（上采样后）
        axes[row, col_lr].imshow(lr_upsampled)
        axes[row, col_lr].set_title(f'Sample {sample_count + 1}: LR (upsampled)')
        axes[row, col_lr].axis('off')

        # 显示高分辨率图像
        axes[row, col_hr].imshow(hr_show)
        axes[row, col_hr].set_title(f'Sample {sample_count + 1}: HR (ground truth)')
        axes[row, col_hr].axis('off')

        sample_count += 1

    # 隐藏未使用的子图
    for i in range(sample_count * 2, 16):
        row = i // 4
        col = i % 4
        if row < 4 and col < 4:
            axes[row, col].axis('off')

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'dataset_samples.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"数据集样本已保存到: {os.path.join(output_dir, 'dataset_samples.png')}")


def get_model_size(model):
    """计算模型在内存中的大小（MB）"""
    param_size = 0
    for param in model.parameters():
        param_size += param.nelement() * param.element_size()
    size_mb = param_size / (1024 * 1024)
    return size_mb


def compute_compressed_size(model, bits=8, sparsity=0.3):
    """计算压缩后模型的理论存储大小（MB）"""
    total_bytes = 0
    total_params = 0
    nonzero_params = 0

    for name, param in model.named_parameters():
        param_count = param.numel()
        total_params += param_count

        if 'weight' in name:
            nz_count = torch.count_nonzero(param).item()
            nonzero_params += nz_count
            sparse_size = nz_count * (4 + bits / 8)
            dense_size = param_count * (bits / 8)

            if sparsity > 0 and sparse_size < dense_size:
                total_bytes += sparse_size
            else:
                total_bytes += dense_size
        elif 'bias' in name:
            total_bytes += param_count * 4

    overhead_bytes = total_bytes * 0.05
    total_size_mb = (total_bytes + overhead_bytes) / (1024 * 1024)
    actual_sparsity = 1.0 - (nonzero_params / total_params) if total_params > 0 else 0

    return total_size_mb, actual_sparsity


# 修复后的量化函数 - 使用更精确的量化方法
def improved_quantize_weights(weight, bits=8):
    """改进的权重量化，保持更高精度"""
    if bits >= 16:  # 对于16位及以上，基本不量化
        return weight.clone(), None, None, None

    # 使用对称量化以减少精度损失
    max_val = torch.max(torch.abs(weight))
    if max_val == 0:
        return weight.clone(), None, None, None

    qmax = 2 ** (bits - 1) - 1  # 对称量化
    qmin = -qmax

    scale = max_val / qmax
    q_weight = torch.round(weight / scale)
    q_weight = torch.clamp(q_weight, qmin, qmax)
    dq_weight = q_weight * scale

    return dq_weight, scale, torch.tensor(0.0), q_weight


# 修复后的剪枝函数 - 使用更智能的剪枝策略
def improved_prune_weights(weight, sparsity=0.3):
    """改进的权重剪枝，基于重要性而非绝对值"""
    if sparsity <= 0:
        return weight.clone(), None

    # 计算权重的重要性（幅度 + 梯度信息的近似）
    weight_importance = weight.abs()

    # 使用百分位数而非固定阈值
    flat_importance = weight_importance.flatten()
    threshold = torch.quantile(flat_importance, sparsity)

    # 创建掩码，保留重要的权重
    mask = (weight_importance >= threshold)

    # 确保至少保留一些权重
    if mask.sum() == 0:
        # 如果所有权重都被剪枝，保留最重要的1%
        k = max(1, int(weight.numel() * 0.01))
        _, indices = torch.topk(flat_importance, k)
        mask = torch.zeros_like(flat_importance, dtype=torch.bool)
        mask[indices] = True
        mask = mask.reshape(weight.shape)

    pruned = weight * mask.float()
    return pruned, mask


# 修复后的压缩函数
def improved_compress_model(model, bits=8, sparsity=0.3):
    """改进的模型压缩，提供更好的性能保持"""
    print(f"使用改进算法压缩模型（量化位数={bits}, 剪枝稀疏度={sparsity}）...")
    compressed_model = copy.deepcopy(model)

    compression_info = {}
    model_info = {
        'model_type': str(type(model).__name__),
        'compressed_layers': compression_info,
        'compression_params': {
            'bits': bits,
            'sparsity': sparsity
        }
    }

    total_elements = 0
    zero_elements = 0

    # 分层处理，对不同层使用不同的压缩策略
    for name, param in compressed_model.named_parameters():
        if 'weight' in name:
            with torch.no_grad():
                original_param = param.data.clone()
                total_elements += param.numel()

                # 对于最后一层，使用更保守的压缩
                layer_sparsity = sparsity
                layer_bits = bits

                if 'output' in name or name.endswith('.2.weight'):  # 输出层
                    layer_sparsity = max(0.1, sparsity * 0.5)  # 减少稀疏度
                    layer_bits = max(8, bits)  # 提高量化精度

                # 先进行剪枝
                pruned_weight, mask = improved_prune_weights(param.data, layer_sparsity)
                if mask is not None:
                    zero_elements += param.numel() - torch.count_nonzero(pruned_weight).item()

                # 再进行量化
                quantized_weight, scale, zero_point, q_weight = improved_quantize_weights(pruned_weight, layer_bits)
                param.copy_(quantized_weight)

                # 保存压缩信息
                if mask is not None:
                    nonzero_mask = mask.flatten()
                    nonzero_indices = nonzero_mask.nonzero(as_tuple=False).flatten().cpu().numpy().tolist()

                    if scale is not None:
                        nonzero_values = q_weight[mask].flatten().cpu().numpy().astype(np.int8).tolist()
                    else:
                        nonzero_values = quantized_weight[mask].flatten().cpu().numpy().tolist()

                    compression_info[name] = {
                        'shape': list(param.shape),
                        'indices': nonzero_indices,
                        'values': nonzero_values,
                        'scale': float(scale.item()) if scale is not None else 1.0,
                        'zero_point': float(zero_point.item()) if zero_point is not None else 0.0,
                        'bits': layer_bits,
                        'sparsity': layer_sparsity
                    }
        elif 'bias' in name:
            # 偏置项通常不压缩或轻微压缩
            compression_info[name] = {
                'shape': list(param.shape),
                'values': param.data.cpu().numpy().tolist(),
                'is_bias': True
            }

    overall_sparsity = zero_elements / total_elements if total_elements > 0 else 0
    print(f"整体稀疏度: {overall_sparsity:.4f}")

    return compressed_model, model_info, overall_sparsity


# 修复后的加载函数
def improved_load_compressed_model(path, model_name="srcnn", scale=4, device=None):
    """改进的压缩模型加载函数"""
    with gzip.open(path, 'rb') as f:
        json_str = f.read().decode('utf-8')

    compressed_info = json.loads(json_str)
    model = load_sr_model(model_name, scale)

    # 逐层恢复权重
    for name, param in model.named_parameters():
        if name in compressed_info['compressed_layers']:
            info = compressed_info['compressed_layers'][name]

            if 'is_bias' in info and info['is_bias']:
                param.data = torch.tensor(info['values'], dtype=param.dtype)
                continue

            # 重构权重矩阵
            shape = info['shape']
            param.data = torch.zeros(shape, dtype=param.dtype)

            if 'indices' in info and 'values' in info:
                # 创建扁平化的掩码
                flat_size = np.prod(shape)
                flat_mask = torch.zeros(flat_size, dtype=torch.bool)
                flat_mask[info['indices']] = True

                # 恢复量化后的值
                scale = info.get('scale', 1.0)
                zero_point = info.get('zero_point', 0.0)

                # 反量化
                values = torch.tensor(info['values'], dtype=torch.float32) * scale + zero_point

                # 重构参数
                flat_param = torch.zeros(flat_size, dtype=param.dtype)
                flat_param[flat_mask] = values
                param.data = flat_param.reshape(shape)

    if device:
        model = model.to(device)

    return model


def save_original_model(model, path):
    """保存原始模型，计算文件大小"""
    torch.save(model.state_dict(), path)
    return os.path.getsize(path)


def save_compressed_model(compressed_info, path):
    """将压缩信息保存为紧凑的文件"""
    json_str = json.dumps(compressed_info)
    with gzip.open(path, 'wb', compresslevel=9) as f:
        f.write(json_str.encode('utf-8'))
    return os.path.getsize(path)


def main():
    """主函数"""
    args = get_args()

    # 确保输出目录存在
    os.makedirs(args.output_dir, exist_ok=True)

    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 打印压缩参数
    print(f"压缩配置: {args.bits}位量化, {args.sparsity:.1%}稀疏度")

    # 创建或加载模型
    model = load_sr_model(args.model, args.scale)
    model = model.to(device)

    trained_model_path = os.path.join(args.output_dir, f"{args.model}_x{args.scale}_trained.pth")
    training_history = None

    # 训练模式
    if args.mode in ['train', 'both']:
        print("=" * 50)
        print("开始卫星图像超分辨率训练阶段")
        print("=" * 50)

        # 创建数据加载器
        train_loader, test_loader = create_dataloaders(
            args.scale, args.batch_size, args.dataset_size, args.hr_size
        )

        # 可视化数据集样本
        visualize_dataset_samples(train_loader, args.output_dir)

        # 训练模型
        model, best_psnr, training_history = train_model(
            model, train_loader, test_loader, device, args
        )

        # 保存训练好的模型
        torch.save(model.state_dict(), trained_model_path)
        print(f"训练好的模型已保存到: {trained_model_path}")

        # 可视化超分辨率结果
        visualize_sr_results(model, test_loader, device, args.output_dir)

        # 保存训练历史
        history_path = os.path.join(args.output_dir, "training_history.json")
        with open(history_path, 'w') as f:
            json.dump(training_history, f, indent=2)

    # 仅压缩模式：加载预训练模型
    elif args.mode == 'compress':
        if args.pretrained_path and os.path.exists(args.pretrained_path):
            model.load_state_dict(torch.load(args.pretrained_path, map_location=device))
            print(f"已加载预训练模型: {args.pretrained_path}")
        elif os.path.exists(trained_model_path):
            model.load_state_dict(torch.load(trained_model_path, map_location=device))
            print(f"已加载训练好的模型: {trained_model_path}")
        else:
            print("警告: 没有找到预训练模型，将使用随机初始化的模型进行压缩")

    # 压缩模式
    if args.mode in ['compress', 'both']:
        print("=" * 50)
        print("开始压缩阶段")
        print("=" * 50)

        # 创建测试数据加载器用于评估
        _, test_loader = create_dataloaders(
            args.scale, args.batch_size, args.dataset_size, args.hr_size
        )

        # 评估原始模型
        print("评估原始模型...")
        original_psnr = evaluate_sr_model(model, test_loader, device)

        # 计算原始模型大小
        original_size = get_model_size(model)

        # 保存原始模型并计算文件大小
        original_path = os.path.join(args.output_dir, f"{args.model}_x{args.scale}_original.pth")
        original_file_size_bytes = save_original_model(model, original_path)
        original_file_size = original_file_size_bytes / (1024 * 1024)

        print(f"原始模型PSNR: {original_psnr:.2f}dB")
        print(f"原始模型内存大小: {original_size:.2f} MB")
        print(f"原始模型文件大小: {original_file_size:.2f} MB")

        # 使用改进的压缩算法
        start_time = time.time()
        compressed_model, compressed_info, actual_sparsity = improved_compress_model(
            model, args.bits, args.sparsity
        )
        compression_time = time.time() - start_time
        print(f"压缩完成，耗时: {compression_time:.2f} 秒")

        # 评估压缩后的模型
        print("评估压缩后的模型...")
        compressed_model = compressed_model.to(device)
        compressed_psnr = evaluate_sr_model(compressed_model, test_loader, device)

        # 计算压缩后的大小
        compressed_size = get_model_size(compressed_model)

        # 保存压缩模型并计算实际文件大小
        compressed_path = os.path.join(args.output_dir, f"{args.model}_x{args.scale}_compressed.pth")
        compressed_file_size_bytes = save_compressed_model(compressed_info, compressed_path)
        compressed_file_size = compressed_file_size_bytes / (1024 * 1024)

        print(f"压缩后模型PSNR: {compressed_psnr:.2f}dB")
        print(f"压缩后模型内存大小: {compressed_size:.2f} MB")
        print(f"压缩后模型文件大小: {compressed_file_size:.2f} MB")
        print(f"实际稀疏度: {actual_sparsity:.4f}")

        # 计算压缩率和性能损失
        file_compression_ratio = original_file_size / compressed_file_size if compressed_file_size > 0 else 0
        psnr_drop = original_psnr - compressed_psnr

        # 测试加载压缩模型
        print("测试加载压缩模型...")
        loaded_model = improved_load_compressed_model(compressed_path, args.model, args.scale, device)
        loaded_psnr = evaluate_sr_model(loaded_model, test_loader, device)
        print(f"加载后模型PSNR: {loaded_psnr:.2f}dB")

        # 检查一致性
        psnr_consistency = abs(compressed_psnr - loaded_psnr)
        print(f"压缩前后PSNR一致性: {psnr_consistency:.2f}dB (越小越好)")

        # 保存完整的结果信息
        results = {
            'model': args.model,
            'scale': args.scale,
            'mode': args.mode,
            'task': 'super_resolution',
            'training_params': {
                'epochs': args.epochs,
                'batch_size': args.batch_size,
                'learning_rate': args.learning_rate,
                'dataset_size': args.dataset_size,
                'hr_size': args.hr_size
            } if args.mode in ['train', 'both'] else None,
            'compression_params': {
                'bits': args.bits,
                'target_sparsity': args.sparsity,
                'actual_sparsity': float(actual_sparsity)
            },
            'model_sizes': {
                'original_memory_mb': float(original_size),
                'original_file_mb': float(original_file_size),
                'compressed_memory_mb': float(compressed_size),
                'compressed_file_mb': float(compressed_file_size),
                'file_compression_ratio': float(file_compression_ratio)
            },
            'performance_metrics': {
                'original_psnr': float(original_psnr),
                'compressed_psnr': float(compressed_psnr),
                'loaded_psnr': float(loaded_psnr),
                'psnr_drop': float(psnr_drop),
                'psnr_consistency': float(psnr_consistency)
            },
            'training_history': training_history
        }

        # 保存结果
        results_path = os.path.join(args.output_dir, "complete_results.json")
        with open(results_path, 'w') as f:
            json.dump(results, f, indent=2)

        # 输出最终统计
        print("\n" + "=" * 60)
        print("卫星图像超分辨率最终结果统计")
        print("=" * 60)
        if training_history:
            print(f"训练最佳PSNR: {training_history['best_test_psnr']:.2f}dB")
        print(f"原始模型PSNR: {original_psnr:.2f}dB")
        print(f"压缩后模型PSNR: {compressed_psnr:.2f}dB")
        print(f"加载后模型PSNR: {loaded_psnr:.2f}dB")
        print(f"PSNR下降: {psnr_drop:.2f}dB")
        print(f"PSNR一致性: {psnr_consistency:.2f}dB")
        print(f"文件压缩率: {file_compression_ratio:.2f}倍")
        print(f"原始文件大小: {original_file_size:.2f} MB")
        print(f"压缩文件大小: {compressed_file_size:.2f} MB")
        print(f"量化位数: {args.bits}")
        print(f"实际稀疏度: {actual_sparsity:.4f}")

        # 评估结果质量
        if psnr_drop <= 2.0:
            print("✅ 压缩质量: 优秀 (PSNR损失 ≤ 2dB)")
        elif psnr_drop <= 5.0:
            print("⚠️  压缩质量: 良好 (PSNR损失 ≤ 5dB)")
        else:
            print("❌ 压缩质量: 需要改进 (PSNR损失 > 5dB)")

        if psnr_consistency <= 1.0:
            print("✅ 加载一致性: 优秀")
        else:
            print("⚠️  加载一致性: 需要检查")

        print(f"结果已保存到: {results_path}")

    print("\n卫星图像超分辨率模型训练和压缩完成!")


if __name__ == "__main__":
    main()