#!/usr/bin/env python
"""
卫星云检测与云去除模型训练和压缩示例 - 修复版
针对分割任务优化压缩策略，添加微调和层级压缩
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset, random_split
import copy
import time
import numpy as np
import json
import gzip
import matplotlib.pyplot as plt
from tqdm import tqdm


# 改进的云检测模型
class CloudDetector(nn.Module):
    def __init__(self, num_classes=2):
        super(CloudDetector, self).__init__()

        # 编码器部分
        self.enc1 = self._make_layer(3, 64)
        self.pool1 = nn.MaxPool2d(2)

        self.enc2 = self._make_layer(64, 128)
        self.pool2 = nn.MaxPool2d(2)

        self.enc3 = self._make_layer(128, 256)
        self.pool3 = nn.MaxPool2d(2)

        self.enc4 = self._make_layer(256, 512)
        self.pool4 = nn.MaxPool2d(2)

        # 瓶颈层
        self.bottleneck = self._make_layer(512, 1024)

        # 解码器部分（带跳跃连接）
        self.upconv4 = nn.ConvTranspose2d(1024, 512, 2, stride=2)
        self.dec4 = self._make_layer(1024, 512)  # 512 + 512 from skip

        self.upconv3 = nn.ConvTranspose2d(512, 256, 2, stride=2)
        self.dec3 = self._make_layer(512, 256)  # 256 + 256 from skip

        self.upconv2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
        self.dec2 = self._make_layer(256, 128)  # 128 + 128 from skip

        self.upconv1 = nn.ConvTranspose2d(128, 64, 2, stride=2)
        self.dec1 = self._make_layer(128, 64)  # 64 + 64 from skip

        # 输出层
        self.output = nn.Conv2d(64, num_classes, 1)

        # Dropout层
        self.dropout = nn.Dropout2d(0.3)

    def _make_layer(self, in_channels, out_channels):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        # 编码器（保存跳跃连接）
        enc1 = self.enc1(x)
        enc1_pool = self.pool1(enc1)

        enc2 = self.enc2(enc1_pool)
        enc2_pool = self.pool2(enc2)

        enc3 = self.enc3(enc2_pool)
        enc3_pool = self.pool3(enc3)

        enc4 = self.enc4(enc3_pool)
        enc4_pool = self.pool4(enc4)

        # 瓶颈
        bottleneck = self.bottleneck(enc4_pool)
        bottleneck = self.dropout(bottleneck)

        # 解码器（使用跳跃连接）
        up4 = self.upconv4(bottleneck)
        dec4 = torch.cat([up4, enc4], dim=1)
        dec4 = self.dec4(dec4)

        up3 = self.upconv3(dec4)
        dec3 = torch.cat([up3, enc3], dim=1)
        dec3 = self.dec3(dec3)

        up2 = self.upconv2(dec3)
        dec2 = torch.cat([up2, enc2], dim=1)
        dec2 = self.dec2(dec2)

        up1 = self.upconv1(dec2)
        dec1 = torch.cat([up1, enc1], dim=1)
        dec1 = self.dec1(dec1)

        # 输出
        output = self.output(dec1)
        return output


# 改进的云去除模型（基于U-Net + 残差连接）
class CloudRemover(nn.Module):
    def __init__(self):
        super(CloudRemover, self).__init__()

        # 编码器
        self.enc1 = self._make_encoder_layer(3, 64)
        self.enc2 = self._make_encoder_layer(64, 128)
        self.enc3 = self._make_encoder_layer(128, 256)
        self.enc4 = self._make_encoder_layer(256, 512)

        # 瓶颈层
        self.bottleneck = nn.Sequential(
            nn.Conv2d(512, 1024, 4, stride=2, padding=1),
            nn.BatchNorm2d(1024),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Dropout2d(0.5),
            nn.Conv2d(1024, 1024, 3, padding=1),
            nn.BatchNorm2d(1024),
            nn.LeakyReLU(0.2, inplace=True)
        )

        # 解码器
        self.dec4 = self._make_decoder_layer(1024 + 512, 512)
        self.dec3 = self._make_decoder_layer(512 + 256, 256)
        self.dec2 = self._make_decoder_layer(256 + 128, 128)
        self.dec1 = self._make_decoder_layer(128 + 64, 64)

        # 输出层
        self.output = nn.Sequential(
            nn.ConvTranspose2d(64, 32, 2, stride=2, padding=0, output_padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            nn.Conv2d(32, 3, 3, padding=1),
            nn.Tanh()
        )

    def _make_encoder_layer(self, in_channels, out_channels):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 4, stride=2, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.LeakyReLU(0.2, inplace=True)
        )

    def _make_decoder_layer(self, in_channels, out_channels):
        return nn.Sequential(
            nn.ConvTranspose2d(in_channels, out_channels, 4, stride=2, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Dropout2d(0.3)
        )

    def forward(self, x):
        # 保存输入用于残差连接
        input_img = x

        # 编码器（保存特征用于跳跃连接）
        enc1 = self.enc1(x)  # /2
        enc2 = self.enc2(enc1)  # /4
        enc3 = self.enc3(enc2)  # /8
        enc4 = self.enc4(enc3)  # /16

        # 瓶颈
        bottleneck = self.bottleneck(enc4)  # /32

        # 解码器（使用跳跃连接）
        dec4 = self.dec4(torch.cat([bottleneck, enc4], dim=1))
        dec3 = self.dec3(torch.cat([dec4, enc3], dim=1))
        dec2 = self.dec2(torch.cat([dec3, enc2], dim=1))
        dec1 = self.dec1(torch.cat([dec2, enc1], dim=1))

        # 输出
        output = self.output(dec1)

        # 残差连接：输出 = 输入 + 去云结果
        output = input_img + output
        output = torch.clamp(output, -1, 1)

        return output


# 改进的云数据集
class CloudDataset(Dataset):
    def __init__(self, size=1000, task='detection', img_size=256):
        self.size = size
        self.task = task
        self.img_size = img_size

        print(f"生成 {size} 个云{task}样本...")
        self.data = []

        for i in tqdm(range(size)):
            if task == 'detection':
                img, mask = self._generate_cloud_detection_sample()
                self.data.append((img, mask))
            else:  # removal
                cloudy, clear = self._generate_cloud_removal_sample()
                self.data.append((cloudy, clear))

    def _generate_cloud_detection_sample(self):
        """生成云检测样本：图像和对应的云掩码"""
        # 创建基础地面图像
        img = torch.zeros(3, self.img_size, self.img_size)

        # 添加地面特征（绿色植被、蓝色水体、棕色土地等）
        base_colors = [
            torch.tensor([0.2, 0.6, 0.2]),  # 植被
            torch.tensor([0.1, 0.3, 0.8]),  # 水体
            torch.tensor([0.6, 0.4, 0.2]),  # 土地
            torch.tensor([0.5, 0.5, 0.5])  # 城市
        ]

        # 随机选择基础颜色
        base_color = base_colors[np.random.randint(0, len(base_colors))]
        img += base_color.view(3, 1, 1)

        # 添加地面纹理
        noise = torch.randn(3, self.img_size, self.img_size) * 0.1
        img += noise

        # 创建云掩码
        mask = torch.zeros(1, self.img_size, self.img_size, dtype=torch.long)

        # 随机决定是否添加云
        if np.random.random() > 0.3:  # 70%概率有云
            num_clouds = np.random.randint(1, 4)

            for _ in range(num_clouds):
                # 随机云的位置和大小
                cloud_type = np.random.choice(['circle', 'irregular', 'streak'])

                if cloud_type == 'circle':
                    # 圆形云
                    center_x = np.random.randint(30, self.img_size - 30)
                    center_y = np.random.randint(30, self.img_size - 30)
                    radius = np.random.randint(15, 40)

                    y_coords, x_coords = torch.meshgrid(
                        torch.arange(self.img_size),
                        torch.arange(self.img_size),
                        indexing='ij'
                    )

                    cloud_mask = ((x_coords - center_x) ** 2 + (y_coords - center_y) ** 2) <= radius ** 2

                elif cloud_type == 'irregular':
                    # 不规则云团
                    h, w = np.random.randint(20, 60), np.random.randint(20, 60)
                    y, x = np.random.randint(0, self.img_size - h), np.random.randint(0, self.img_size - w)

                    # 创建不规则形状
                    cloud_shape = torch.rand(h, w) > 0.4
                    cloud_mask = torch.zeros(self.img_size, self.img_size, dtype=torch.bool)
                    cloud_mask[y:y + h, x:x + w] = cloud_shape

                else:  # streak
                    # 条纹云
                    if np.random.random() > 0.5:  # 水平条纹
                        y = np.random.randint(20, self.img_size - 20)
                        thickness = np.random.randint(5, 15)
                        cloud_mask = torch.zeros(self.img_size, self.img_size, dtype=torch.bool)
                        cloud_mask[y:y + thickness, :] = True
                    else:  # 垂直条纹
                        x = np.random.randint(20, self.img_size - 20)
                        thickness = np.random.randint(5, 15)
                        cloud_mask = torch.zeros(self.img_size, self.img_size, dtype=torch.bool)
                        cloud_mask[:, x:x + thickness] = True

                # 应用云效果到图像
                cloud_color = torch.tensor([0.9, 0.9, 0.9]).view(3, 1, 1)  # 白色云
                cloud_alpha = 0.7 + np.random.random() * 0.3  # 随机透明度

                for c in range(3):
                    img[c][cloud_mask] = img[c][cloud_mask] * (1 - cloud_alpha) + cloud_color[c, 0, 0] * cloud_alpha

                # 更新掩码
                mask[0][cloud_mask] = 1

        # 确保像素值在合理范围内
        img = torch.clamp(img, 0, 1)

        return img, mask

    def _generate_cloud_removal_sample(self):
        """生成云去除样本：有云图像和对应的无云图像"""
        # 首先生成无云的清晰图像
        clear_img = torch.zeros(3, self.img_size, self.img_size)

        # 添加地面特征
        base_colors = [
            torch.tensor([0.2, 0.6, 0.2]),  # 植被
            torch.tensor([0.1, 0.3, 0.8]),  # 水体
            torch.tensor([0.6, 0.4, 0.2]),  # 土地
            torch.tensor([0.5, 0.5, 0.5])  # 城市
        ]

        # 创建多区域图像
        for region in range(np.random.randint(2, 5)):
            color = base_colors[np.random.randint(0, len(base_colors))]
            h, w = np.random.randint(30, 80), np.random.randint(30, 80)
            y, x = np.random.randint(0, self.img_size - h), np.random.randint(0, self.img_size - w)
            clear_img[:, y:y + h, x:x + w] = color.view(3, 1, 1)

        # 添加细节纹理
        detail = torch.randn(3, self.img_size, self.img_size) * 0.1
        clear_img += detail

        # 创建有云版本
        cloudy_img = clear_img.clone()

        # 添加云层
        num_clouds = np.random.randint(2, 6)
        for _ in range(num_clouds):
            # 随机云的位置、大小和透明度
            cloud_h = np.random.randint(20, 60)
            cloud_w = np.random.randint(20, 60)
            cloud_y = np.random.randint(0, self.img_size - cloud_h)
            cloud_x = np.random.randint(0, self.img_size - cloud_w)

            # 创建云的形状（使用高斯模糊效果）
            cloud_shape = torch.rand(cloud_h, cloud_w)
            cloud_shape = F.avg_pool2d(cloud_shape.unsqueeze(0).unsqueeze(0),
                                       kernel_size=5, stride=1, padding=2)
            cloud_shape = cloud_shape.squeeze()
            cloud_mask = cloud_shape > 0.3

            # 云的颜色和透明度
            cloud_intensity = 0.7 + np.random.random() * 0.3
            cloud_color = torch.tensor([0.95, 0.95, 0.95])  # 接近白色

            # 应用云遮挡效果
            for c in range(3):
                region = cloudy_img[c, cloud_y:cloud_y + cloud_h, cloud_x:cloud_x + cloud_w]
                region[cloud_mask] = region[cloud_mask] * (1 - cloud_intensity) + cloud_color[c] * cloud_intensity
                cloudy_img[c, cloud_y:cloud_y + cloud_h, cloud_x:cloud_x + cloud_w] = region

        # 标准化到[-1, 1]范围（适合Tanh输出）
        clear_img = torch.clamp(clear_img, 0, 1) * 2 - 1
        cloudy_img = torch.clamp(cloudy_img, 0, 1) * 2 - 1

        return cloudy_img, clear_img

    def __len__(self):
        return self.size

    def __getitem__(self, idx):
        return self.data[idx]


def get_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="卫星云检测与云去除模型训练和压缩示例")

    # 模型和任务选择
    parser.add_argument("--task", "-t", default="detection",
                        choices=["detection", "removal"],
                        help="云任务: detection(检测) 或 removal(去除)")

    # 数据和输出参数
    parser.add_argument("--data-dir", "-d", default="./data",
                        help="云数据集目录路径")
    parser.add_argument("--output-dir", "-o", default="./output",
                        help="模型和结果的输出目录")

    # 训练参数
    parser.add_argument("--epochs", "-e", type=int, default=25,
                        help="训练轮数 (默认: 25)")
    parser.add_argument("--batch-size", "-b", type=int, default=8,
                        help="批次大小 (默认: 8)")
    parser.add_argument("--learning-rate", "-lr", type=float, default=0.001,
                        help="学习率 (默认: 0.001)")
    parser.add_argument("--weight-decay", type=float, default=1e-4,
                        help="权重衰减 (默认: 1e-4)")
    parser.add_argument("--dataset-size", type=int, default=1000,
                        help="数据集大小 (默认: 1000)")
    parser.add_argument("--img-size", type=int, default=256,
                        help="输入图像大小 (默认: 256)")

    # 改进的压缩参数 - 为分割任务优化
    parser.add_argument("--compression-level", choices=['conservative', 'moderate', 'aggressive'],
                        default='conservative',
                        help="压缩级别: conservative(保守), moderate(中等), aggressive(激进)")
    parser.add_argument("--bits", type=int, default=None,
                        help="量化位数 (覆盖compression-level设置)")
    parser.add_argument("--sparsity", type=float, default=None,
                        help="剪枝稀疏度 (覆盖compression-level设置)")
    parser.add_argument("--finetune-epochs", type=int, default=15,
                        help="压缩后微调轮数 (默认: 15)")

    # 模式选择
    parser.add_argument("--mode", choices=['train', 'compress', 'both'], default='both',
                        help="运行模式: train(仅训练), compress(仅压缩), both(训练+压缩)")
    parser.add_argument("--pretrained-path", type=str, default=None,
                        help="预训练模型路径 (用于仅压缩模式)")

    return parser.parse_args()


def get_compression_params(level, task):
    """根据压缩级别和任务类型返回压缩参数"""
    # 分割任务需要更保守的参数
    if task == 'detection':
        params = {
            'conservative': {'bits': 16, 'sparsity': 0.1},  # 非常保守
            'moderate': {'bits': 8, 'sparsity': 0.3},  # 中等压缩
            'aggressive': {'bits': 6, 'sparsity': 0.5}  # 相对激进
        }
    else:  # removal - 云去除对精度要求更高
        params = {
            'conservative': {'bits': 16, 'sparsity': 0.05},  # 极保守
            'moderate': {'bits': 8, 'sparsity': 0.2},  # 中等压缩
            'aggressive': {'bits': 8, 'sparsity': 0.4}  # 相对激进
        }
    return params[level]


def load_cloud_model(task, img_size=256):
    """加载云检测/去除模型"""
    print(f"创建云{task}模型...")

    if task == "detection":
        model = CloudDetector(num_classes=2)
    else:  # removal
        model = CloudRemover()

    return model


def create_dataloaders(task, batch_size=8, dataset_size=1000, img_size=256):
    """创建云检测/去除数据集加载器"""
    print(f"准备云{task}数据集...")

    # 创建完整数据集
    full_dataset = CloudDataset(size=dataset_size, task=task, img_size=img_size)

    # 划分训练集和测试集 (80%训练, 20%测试)
    train_size = int(0.8 * len(full_dataset))
    test_size = len(full_dataset) - train_size
    train_dataset, test_dataset = random_split(full_dataset, [train_size, test_size])

    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=2,
        pin_memory=True
    )

    test_loader = DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=2,
        pin_memory=True
    )

    print(f"训练集大小: {len(train_dataset)}")
    print(f"测试集大小: {len(test_dataset)}")

    return train_loader, test_loader


def dice_loss(pred, target, smooth=1e-5):
    """Dice损失函数，用于云检测分割任务"""
    pred = torch.softmax(pred, dim=1)
    pred = pred[:, 1, :, :]  # 获取云类别的概率
    target = target.float().squeeze(1)

    intersection = (pred * target).sum()
    dice = (2.0 * intersection + smooth) / (pred.sum() + target.sum() + smooth)
    return 1 - dice


def perceptual_loss(pred, target):
    """感知损失，用于云去除任务"""
    # 简化的感知损失：使用L1损失 + 结构相似性
    l1_loss = F.l1_loss(pred, target)

    # 计算梯度差异（边缘保持）
    pred_grad_x = pred[:, :, :, 1:] - pred[:, :, :, :-1]
    pred_grad_y = pred[:, :, 1:, :] - pred[:, :, :-1, :]
    target_grad_x = target[:, :, :, 1:] - target[:, :, :, :-1]
    target_grad_y = target[:, :, 1:, :] - target[:, :, :-1, :]

    grad_loss = F.l1_loss(pred_grad_x, target_grad_x) + F.l1_loss(pred_grad_y, target_grad_y)

    return l1_loss + 0.1 * grad_loss


def calculate_iou(pred, target):
    """计算IoU（用于云检测）"""
    pred = torch.softmax(pred, dim=1)
    pred = (pred[:, 1, :, :] > 0.5).float()
    target = target.float().squeeze(1)

    intersection = (pred * target).sum()
    union = pred.sum() + target.sum() - intersection

    if union == 0:
        return 1.0 if intersection == 0 else 0.0

    return (intersection / union).item()


def calculate_psnr(pred, target):
    """计算PSNR（用于云去除）"""
    mse = F.mse_loss(pred, target)
    if mse == 0:
        return float('inf')
    return 20 * torch.log10(2.0 / torch.sqrt(mse)).item()  # 2.0 because range is [-1, 1]


def train_model(model, train_loader, test_loader, device, args):
    """训练云检测/去除模型"""
    print(f"开始训练云{args.task}模型...")

    # 定义优化器
    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)

    # 记录训练历史
    train_losses = []
    train_metrics = []
    test_metrics = []

    best_test_metric = 0.0 if args.task == 'detection' else float('-inf')
    best_model_state = None

    # 训练循环
    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        running_metric = 0.0
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, batch_data in enumerate(pbar):
            if args.task == 'detection':
                images, masks = batch_data
                images, masks = images.to(device), masks.to(device)

                # 前向传播
                optimizer.zero_grad()
                outputs = model(images)

                # 计算损失（交叉熵 + Dice损失）
                ce_loss = F.cross_entropy(outputs, masks.squeeze(1))
                d_loss = dice_loss(outputs, masks)
                loss = 0.7 * ce_loss + 0.3 * d_loss

                # 计算IoU
                metric = calculate_iou(outputs, masks)

            else:  # removal
                cloudy, clear = batch_data
                cloudy, clear = cloudy.to(device), clear.to(device)

                # 前向传播
                optimizer.zero_grad()
                outputs = model(cloudy)

                # 计算感知损失
                loss = perceptual_loss(outputs, clear)

                # 计算PSNR
                metric = calculate_psnr(outputs, clear)

            # 反向传播
            loss.backward()
            optimizer.step()

            # 统计
            running_loss += loss.item()
            running_metric += metric
            num_batches += 1

            # 更新进度条
            metric_name = 'IoU' if args.task == 'detection' else 'PSNR'
            pbar.set_postfix({
                'Loss': f'{loss.item():.4f}',
                metric_name: f'{metric:.4f}' if args.task == 'detection' else f'{metric:.2f}'
            })

        # 计算平均训练损失和指标
        avg_train_loss = running_loss / num_batches
        train_metric = running_metric / num_batches

        # 测试阶段
        test_metric = evaluate_model(model, test_loader, device, args.task)

        # 记录历史
        train_losses.append(avg_train_loss)
        train_metrics.append(train_metric)
        test_metrics.append(test_metric)

        # 保存最佳模型
        is_best = (args.task == 'detection' and test_metric > best_test_metric) or \
                  (args.task == 'removal' and test_metric > best_test_metric)

        if is_best:
            best_test_metric = test_metric
            best_model_state = copy.deepcopy(model.state_dict())

        # 更新学习率
        scheduler.step()

        metric_name = 'IoU' if args.task == 'detection' else 'PSNR'
        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train {metric_name}: {train_metric:.4f}, '
              f'Test {metric_name}: {test_metric:.4f}')

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f'训练完成! 最佳测试{metric_name}: {best_test_metric:.4f}')

    # 绘制训练曲线
    plot_training_curves(train_losses, train_metrics, test_metrics, args.output_dir, args.task)

    return model, best_test_metric, {
        'train_losses': train_losses,
        'train_metrics': train_metrics,
        'test_metrics': test_metrics,
        'best_test_metric': best_test_metric
    }


def finetune_model(model, train_loader, test_loader, device, task, epochs=15, lr=0.0001):
    """压缩后微调模型"""
    print(f"开始微调云{task}模型 ({epochs} 轮)...")

    # 使用较小的学习率进行微调
    if task == 'detection':
        optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-5)
    else:  # removal需要更小的学习率
        optimizer = optim.Adam(model.parameters(), lr=lr * 0.5, weight_decay=1e-5)

    initial_metric = evaluate_model(model, test_loader, device, task)
    metric_name = 'IoU' if task == 'detection' else 'PSNR'
    print(f"微调前{metric_name}: {initial_metric:.4f}")

    best_metric = initial_metric
    best_state = copy.deepcopy(model.state_dict())

    for epoch in range(epochs):
        model.train()
        running_loss = 0.0
        running_metric = 0.0
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Finetune {epoch + 1}/{epochs}')
        for batch_data in pbar:
            if task == 'detection':
                images, masks = batch_data
                images, masks = images.to(device), masks.to(device)

                optimizer.zero_grad()
                outputs = model(images)

                # 计算损失
                ce_loss = F.cross_entropy(outputs, masks.squeeze(1))
                d_loss = dice_loss(outputs, masks)
                loss = 0.7 * ce_loss + 0.3 * d_loss

                # 计算IoU
                metric = calculate_iou(outputs, masks)

            else:  # removal
                cloudy, clear = batch_data
                cloudy, clear = cloudy.to(device), clear.to(device)

                optimizer.zero_grad()
                outputs = model(cloudy)

                # 计算感知损失
                loss = perceptual_loss(outputs, clear)

                # 计算PSNR
                metric = calculate_psnr(outputs, clear)

            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            running_metric += metric
            num_batches += 1

            pbar.set_postfix({
                'Loss': f'{loss.item():.4f}',
                metric_name: f'{metric:.4f}' if task == 'detection' else f'{metric:.2f}'
            })

        # 评估
        test_metric = evaluate_model(model, test_loader, device, task)
        avg_loss = running_loss / num_batches
        train_metric = running_metric / num_batches

        print(
            f'Finetune Epoch [{epoch + 1}/{epochs}] - Loss: {avg_loss:.4f}, Train {metric_name}: {train_metric:.4f}, Test {metric_name}: {test_metric:.4f}')

        # 保存最佳模型
        is_better = (task == 'detection' and test_metric > best_metric) or \
                    (task == 'removal' and test_metric > best_metric)

        if is_better:
            best_metric = test_metric
            best_state = copy.deepcopy(model.state_dict())

    # 加载最佳微调模型
    model.load_state_dict(best_state)
    final_metric = evaluate_model(model, test_loader, device, task)

    print(f"微调完成! 最终{metric_name}: {final_metric:.4f} (提升: {final_metric - initial_metric:+.4f})")

    return model, final_metric


def evaluate_model(model, dataloader, device, task):
    """评估云检测/去除模型性能"""
    model.eval()
    total_metric = 0.0
    num_batches = 0

    with torch.no_grad():
        for batch_data in dataloader:
            if task == 'detection':
                images, masks = batch_data
                images, masks = images.to(device), masks.to(device)
                outputs = model(images)
                metric = calculate_iou(outputs, masks)
            else:  # removal
                cloudy, clear = batch_data
                cloudy, clear = cloudy.to(device), clear.to(device)
                outputs = model(cloudy)
                metric = calculate_psnr(outputs, clear)

            total_metric += metric
            num_batches += 1

    average_metric = total_metric / num_batches if num_batches > 0 else 0
    return average_metric


def plot_training_curves(train_losses, train_metrics, test_metrics, output_dir, task):
    """绘制训练曲线"""
    epochs = range(1, len(train_losses) + 1)
    metric_name = 'IoU' if task == 'detection' else 'PSNR'

    plt.figure(figsize=(12, 4))

    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(epochs, train_losses, 'b-', label='Training Loss')
    plt.title('Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)

    # 绘制指标曲线
    plt.subplot(1, 2, 2)
    plt.plot(epochs, train_metrics, 'b-', label=f'Training {metric_name}')
    plt.plot(epochs, test_metrics, 'r-', label=f'Test {metric_name}')
    plt.title(f'Training and Test {metric_name}')
    plt.xlabel('Epoch')
    plt.ylabel(metric_name)
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, f'training_curves_{task}.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"训练曲线已保存到: {os.path.join(output_dir, f'training_curves_{task}.png')}")


def visualize_results(model, dataloader, device, task, output_dir, num_samples=4):
    """可视化结果"""
    model.eval()

    fig, axes = plt.subplots(num_samples, 3 if task == 'detection' else 3,
                             figsize=(12, 4 * num_samples))
    if task == 'detection':
        fig.suptitle('云检测结果', fontsize=16)
    else:
        fig.suptitle('云去除结果', fontsize=16)

    with torch.no_grad():
        for i, batch_data in enumerate(dataloader):
            if i >= num_samples:
                break

            if task == 'detection':
                images, masks = batch_data
                images, masks = images.to(device), masks.to(device)
                outputs = model(images)
                pred = torch.softmax(outputs, dim=1)[:, 1, :, :] > 0.5

                # 取第一个样本
                img_show = images[0].cpu().permute(1, 2, 0).numpy()
                mask_true = masks[0, 0].cpu().numpy()
                mask_pred = pred[0].cpu().numpy()

                # 归一化显示
                img_show = np.clip(img_show, 0, 1)

                axes[i, 0].imshow(img_show)
                axes[i, 0].set_title('原始图像')
                axes[i, 0].axis('off')

                axes[i, 1].imshow(mask_true, cmap='gray')
                axes[i, 1].set_title('真实云掩码')
                axes[i, 1].axis('off')

                axes[i, 2].imshow(mask_pred, cmap='gray')
                axes[i, 2].set_title('预测云掩码')
                axes[i, 2].axis('off')

            else:  # removal
                cloudy, clear = batch_data
                cloudy, clear = cloudy.to(device), clear.to(device)
                outputs = model(cloudy)

                # 取第一个样本并转换到[0,1]范围
                cloudy_show = (cloudy[0].cpu().permute(1, 2, 0).numpy() + 1) / 2
                clear_show = (clear[0].cpu().permute(1, 2, 0).numpy() + 1) / 2
                output_show = (outputs[0].cpu().permute(1, 2, 0).numpy() + 1) / 2

                # 确保在[0,1]范围内
                cloudy_show = np.clip(cloudy_show, 0, 1)
                clear_show = np.clip(clear_show, 0, 1)
                output_show = np.clip(output_show, 0, 1)

                axes[i, 0].imshow(cloudy_show)
                axes[i, 0].set_title('有云图像')
                axes[i, 0].axis('off')

                axes[i, 1].imshow(clear_show)
                axes[i, 1].set_title('无云图像(GT)')
                axes[i, 1].axis('off')

                axes[i, 2].imshow(output_show)
                axes[i, 2].set_title('去云结果')
                axes[i, 2].axis('off')

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, f'{task}_results.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"{task}结果可视化已保存到: {os.path.join(output_dir, f'{task}_results.png')}")


def get_model_size(model):
    """计算模型在内存中的大小（MB）"""
    param_size = 0
    for param in model.parameters():
        param_size += param.nelement() * param.element_size()
    size_mb = param_size / (1024 * 1024)
    return size_mb


def quantize_weights(weight, bits=8):
    """改进的量化函数"""
    if bits >= 16:  # 16位及以上不进行量化
        return weight.clone(), None, None, None

    if bits == 8:  # 8位量化使用对称量化
        max_val = max(weight.abs().max(), 1e-8)  # 避免除零
        scale = max_val / 127.0  # 使用-127到127的范围
        q_weight = torch.round(weight / scale)
        q_weight = torch.clamp(q_weight, -127, 127)
        dq_weight = q_weight * scale
        return dq_weight, scale, torch.tensor(0.0), q_weight
    else:
        # 对于更低位数使用非对称量化
        qmin, qmax = -(2 ** (bits - 1)), 2 ** (bits - 1) - 1
        min_val, max_val = weight.min(), weight.max()

        if abs(max_val - min_val) < 1e-8:
            return weight.clone(), None, None, None

        scale = (max_val - min_val) / (qmax - qmin)
        zero_point = qmin - min_val / scale

        q_weight = torch.round(weight / scale + zero_point)
        q_weight = torch.clamp(q_weight, qmin, qmax)
        dq_weight = (q_weight - zero_point) * scale

        return dq_weight, scale, zero_point, q_weight


def prune_weights(weight, sparsity=0.1, layer_name=""):
    """改进的剪枝函数，对分割任务更保守，修复边界检查问题"""
    if sparsity <= 0 or sparsity >= 1.0:
        return weight.clone(), torch.ones_like(weight).bool()

    weight_abs = weight.abs()

    # 对于分割任务，需要更保守的剪枝策略
    if len(weight.shape) == 4:  # 卷积层
        # 区分编码器和解码器层
        if 'enc' in layer_name or 'bottleneck' in layer_name:
            effective_sparsity = min(sparsity, 0.8)  # 最多剪枝80%
        elif 'dec' in layer_name or 'upconv' in layer_name:
            effective_sparsity = min(sparsity * 0.3, 0.5)  # 解码器更保守
        elif 'output' in layer_name:
            effective_sparsity = min(sparsity * 0.1, 0.2)  # 输出层极保守
        else:
            effective_sparsity = min(sparsity * 0.5, 0.6)

        # 简化的剪枝策略，避免复杂的结构化剪枝
        weight_flat = weight_abs.flatten()
        total_elements = weight_flat.numel()

        if total_elements <= 1:
            return weight.clone(), torch.ones_like(weight).bool()

        k = max(1, min(int(total_elements * effective_sparsity), total_elements - 1))

        if k >= total_elements:
            # 如果要剪枝的数量超过总数，返回原权重
            return weight.clone(), torch.ones_like(weight).bool()

        try:
            threshold = torch.kthvalue(weight_flat, k).values
            mask = (weight.abs() >= threshold)
        except RuntimeError as e:
            print(f"警告: 剪枝时出错 {layer_name}: {e}, 跳过此层剪枝")
            return weight.clone(), torch.ones_like(weight).bool()

    elif len(weight.shape) == 2:  # 全连接层
        effective_sparsity = min(sparsity * 0.2, 0.3)  # 全连接层非常保守
        weight_flat = weight_abs.flatten()
        total_elements = weight_flat.numel()

        if total_elements <= 1:
            return weight.clone(), torch.ones_like(weight).bool()

        k = max(1, min(int(total_elements * effective_sparsity), total_elements - 1))

        if k >= total_elements:
            return weight.clone(), torch.ones_like(weight).bool()

        try:
            threshold = torch.kthvalue(weight_flat, k).values
            mask = (weight.abs() >= threshold)
        except RuntimeError as e:
            print(f"警告: 剪枝时出错 {layer_name}: {e}, 跳过此层剪枝")
            return weight.clone(), torch.ones_like(weight).bool()

    elif len(weight.shape) == 1:  # 1D权重（如BatchNorm）
        # 对1D权重更加保守
        effective_sparsity = min(sparsity * 0.1, 0.1)
        weight_flat = weight_abs.flatten()
        total_elements = weight_flat.numel()

        if total_elements <= 2:  # 太小的权重不剪枝
            return weight.clone(), torch.ones_like(weight).bool()

        k = max(1, min(int(total_elements * effective_sparsity), total_elements - 1))

        if k >= total_elements:
            return weight.clone(), torch.ones_like(weight).bool()

        try:
            threshold = torch.kthvalue(weight_flat, k).values
            mask = (weight.abs() >= threshold)
        except RuntimeError as e:
            print(f"警告: 剪枝时出错 {layer_name}: {e}, 跳过此层剪枝")
            return weight.clone(), torch.ones_like(weight).bool()
    else:
        # 其他形状的权重，非常保守处理
        print(f"未知权重形状 {weight.shape} 在层 {layer_name}，跳过剪枝")
        return weight.clone(), torch.ones_like(weight).bool()

    # 应用掩码
    pruned = weight * mask.float()

    # 验证剪枝结果
    if torch.sum(mask) == 0:
        print(f"警告: 层 {layer_name} 所有权重都被剪枝，恢复原权重")
        return weight.clone(), torch.ones_like(weight).bool()

    return pruned, mask


def compress_model(model, bits=8, sparsity=0.1):
    """改进的模型压缩函数，针对分割任务优化，添加更好的错误处理"""
    print(f"压缩模型（量化位数={bits}, 剪枝稀疏度={sparsity}）...")

    compressed_model = copy.deepcopy(model)
    compression_info = {}
    model_info = {
        'model_type': str(type(model).__name__),
        'bits': bits,
        'sparsity': sparsity,
        'compressed_layers': compression_info
    }

    total_elements = 0
    zero_elements = 0
    skipped_layers = []

    for name, param in compressed_model.named_parameters():
        if 'weight' in name and param.numel() > 1:  # 只压缩权重层，跳过单个参数
            with torch.no_grad():
                original_param = param.data.clone()
                total_elements += param.numel()

                # 根据层的重要性调整压缩程度
                if 'output' in name:
                    # 输出层使用最保守的压缩
                    current_sparsity = min(sparsity * 0.1, 0.2)
                    current_bits = max(16, bits)
                elif 'dec' in name or 'upconv' in name:
                    # 解码器层保守压缩
                    current_sparsity = min(sparsity * 0.3, 0.5)
                    current_bits = max(8, bits)
                elif 'bottleneck' in name:
                    # 瓶颈层中等压缩
                    current_sparsity = min(sparsity * 0.7, 0.8)
                    current_bits = bits
                else:
                    # 编码器层可以正常压缩
                    current_sparsity = min(sparsity, 0.8)
                    current_bits = bits

                # 先应用剪枝
                try:
                    if current_sparsity > 0 and param.numel() > 2:  # 只对足够大的参数剪枝
                        pruned_weight, mask = prune_weights(original_param, current_sparsity, name)
                        if mask is not None:
                            nonzero_count = torch.sum(mask).item()
                            zero_elements += param.numel() - nonzero_count
                        else:
                            pruned_weight = original_param
                    else:
                        pruned_weight = original_param
                        mask = torch.ones_like(original_param).bool()
                except Exception as e:
                    print(f"剪枝失败 {name}: {e}, 跳过剪枝")
                    pruned_weight = original_param
                    mask = torch.ones_like(original_param).bool()
                    skipped_layers.append(name)

                # 再应用量化
                try:
                    if current_bits < 16 and param.numel() > 1:
                        quantized_weight, scale, zero_point, q_weight = quantize_weights(pruned_weight, current_bits)
                        if quantized_weight is not None:
                            param.copy_(quantized_weight)
                        else:
                            param.copy_(pruned_weight)
                            scale, zero_point, q_weight = None, None, None
                    else:
                        param.copy_(pruned_weight)
                        scale, zero_point, q_weight = None, None, None
                except Exception as e:
                    print(f"量化失败 {name}: {e}, 跳过量化")
                    param.copy_(pruned_weight)
                    scale, zero_point, q_weight = None, None, None
                    if name not in skipped_layers:
                        skipped_layers.append(name)

                # 保存压缩信息（简化版本，只保存必要信息）
                compression_info[name] = {
                    'original_size': param.numel(),
                    'nonzero_count': torch.count_nonzero(param.data).item(),
                    'compression_ratio': current_sparsity,
                    'quantization_bits': current_bits,
                    'layer_type': 'conv' if len(param.shape) == 4 else ('fc' if len(param.shape) == 2 else 'other')
                }
        elif 'bias' in name:
            # 偏置参数通常不压缩
            compression_info[name] = {
                'original_size': param.numel(),
                'nonzero_count': param.numel(),
                'compression_ratio': 0.0,
                'quantization_bits': 32,
                'layer_type': 'bias'
            }

    overall_sparsity = zero_elements / total_elements if total_elements > 0 else 0
    print(f"整体稀疏度: {overall_sparsity:.4f}")

    if skipped_layers:
        print(f"跳过的层: {len(skipped_layers)} 个")
        for layer in skipped_layers[:3]:  # 只显示前3个
            print(f"  - {layer}")
        if len(skipped_layers) > 3:
            print(f"  - ... 还有 {len(skipped_layers) - 3} 个")

    return compressed_model, model_info, overall_sparsity


def save_compressed_model_simple(model, path):
    """简化的压缩模型保存 - 直接保存PyTorch模型"""
    torch.save({
        'model_state_dict': model.state_dict(),
        'model_class': str(type(model).__name__),
        'compression_applied': True
    }, path)
    return os.path.getsize(path)


def load_compressed_model_simple(path, task="detection", device=None):
    """简化的压缩模型加载"""
    try:
        checkpoint = torch.load(path, map_location=device if device else 'cpu', weights_only=False)
        model = load_cloud_model(task)
        model.load_state_dict(checkpoint['model_state_dict'])
        if device:
            model = model.to(device)
        return model
    except Exception as e:
        print(f"加载模型失败: {e}")
        return None


def main():
    """主函数"""
    args = get_args()

    # 确保输出目录存在
    os.makedirs(args.output_dir, exist_ok=True)

    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 获取压缩参数
    if args.bits is not None and args.sparsity is not None:
        compression_params = {'bits': args.bits, 'sparsity': args.sparsity}
    else:
        compression_params = get_compression_params(args.compression_level, args.task)

    print(f"压缩级别: {args.compression_level}")
    print(f"压缩参数: {compression_params['bits']}位量化, {compression_params['sparsity']}稀疏度")

    # 创建或加载模型
    model = load_cloud_model(args.task, args.img_size)
    model = model.to(device)

    trained_model_path = os.path.join(args.output_dir, f"cloud_{args.task}_trained.pth")
    training_history = None

    # 训练模式
    if args.mode in ['train', 'both']:
        print("=" * 50)
        print(f"开始云{args.task}训练阶段")
        print("=" * 50)

        # 创建数据加载器
        train_loader, test_loader = create_dataloaders(
            args.task, args.batch_size, args.dataset_size, args.img_size
        )

        # 训练模型
        model, best_metric, training_history = train_model(
            model, train_loader, test_loader, device, args
        )

        # 保存训练好的模型
        torch.save(model.state_dict(), trained_model_path)
        print(f"训练好的模型已保存到: {trained_model_path}")

        # 可视化结果
        visualize_results(model, test_loader, device, args.task, args.output_dir)

        # 保存训练历史
        history_path = os.path.join(args.output_dir, f"training_history_{args.task}.json")
        with open(history_path, 'w') as f:
            json.dump(training_history, f, indent=2)

    # 仅压缩模式：加载预训练模型
    elif args.mode == 'compress':
        if args.pretrained_path and os.path.exists(args.pretrained_path):
            model.load_state_dict(torch.load(args.pretrained_path, map_location=device, weights_only=True))
            print(f"已加载预训练模型: {args.pretrained_path}")
        elif os.path.exists(trained_model_path):
            model.load_state_dict(torch.load(trained_model_path, map_location=device, weights_only=True))
            print(f"已加载训练好的模型: {trained_model_path}")
        else:
            print("警告: 没有找到预训练模型，将使用随机初始化的模型进行压缩")

    # 压缩模式
    if args.mode in ['compress', 'both']:
        print("=" * 50)
        print("开始压缩阶段")
        print("=" * 50)

        # 创建测试数据加载器用于评估
        _, test_loader = create_dataloaders(
            args.task, args.batch_size, args.dataset_size, args.img_size
        )

        # 评估原始模型
        print("评估原始模型...")
        original_metric = evaluate_model(model, test_loader, device, args.task)

        # 计算原始模型大小
        original_size = get_model_size(model)

        # 保存原始模型并计算文件大小
        original_path = os.path.join(args.output_dir, f"cloud_{args.task}_original.pth")
        torch.save(model.state_dict(), original_path)
        original_file_size = os.path.getsize(original_path) / (1024 * 1024)

        metric_name = 'IoU' if args.task == 'detection' else 'PSNR'
        print(f"原始模型{metric_name}: {original_metric:.4f}")
        print(f"原始模型内存大小: {original_size:.2f} MB")
        print(f"原始模型文件大小: {original_file_size:.2f} MB")

        # 压缩模型
        start_time = time.time()
        compressed_model, compressed_info, actual_sparsity = compress_model(
            model,
            compression_params['bits'],
            compression_params['sparsity']
        )
        compression_time = time.time() - start_time
        print(f"压缩完成，耗时: {compression_time:.2f} 秒")

        # 评估压缩后的模型
        print("评估压缩后的模型...")
        compressed_model = compressed_model.to(device)
        compressed_metric = evaluate_model(compressed_model, test_loader, device, args.task)

        print(f"压缩后模型{metric_name}: {compressed_metric:.4f}")

        # 计算性能变化
        if args.task == 'detection':
            metric_change = (compressed_metric - original_metric) * 100
            print(f"{metric_name}变化: {metric_change:+.2f}%")
        else:
            metric_change = compressed_metric - original_metric
            print(f"{metric_name}变化: {metric_change:+.2f}dB")

        # 如果性能下降过大，进行微调
        performance_drop_threshold = 0.1 if args.task == 'detection' else 5.0  # IoU下降10% 或 PSNR下降5dB

        if ((args.task == 'detection' and (original_metric - compressed_metric) > performance_drop_threshold) or
            (args.task == 'removal' and (original_metric - compressed_metric) > performance_drop_threshold)) and \
                args.finetune_epochs > 0:

            print("性能下降较大，开始微调...")
            train_loader, _ = create_dataloaders(
                args.task, args.batch_size, args.dataset_size, args.img_size
            )
            compressed_model, finetuned_metric = finetune_model(
                compressed_model, train_loader, test_loader, device, args.task, args.finetune_epochs
            )
            print(f"微调后{metric_name}: {finetuned_metric:.4f}")
        else:
            finetuned_metric = compressed_metric

        # 计算压缩后的大小
        compressed_size = get_model_size(compressed_model)

        # 保存压缩模型并计算实际文件大小
        compressed_path = os.path.join(args.output_dir, f"cloud_{args.task}_compressed.pth")
        compressed_file_size_bytes = save_compressed_model_simple(compressed_model, compressed_path)
        compressed_file_size = compressed_file_size_bytes / (1024 * 1024)

        print(f"压缩后模型内存大小: {compressed_size:.2f} MB")
        print(f"压缩后模型文件大小: {compressed_file_size:.2f} MB")
        print(f"实际稀疏度: {actual_sparsity:.4f}")

        # 计算压缩率
        file_compression_ratio = original_file_size / compressed_file_size

        # 测试加载压缩模型
        print("测试加载压缩模型...")
        loaded_model = load_compressed_model_simple(compressed_path, args.task, device)
        if loaded_model is not None:
            loaded_metric = evaluate_model(loaded_model, test_loader, device, args.task)
            print(f"加载后模型{metric_name}: {loaded_metric:.4f}")
        else:
            print("加载压缩模型失败")
            loaded_metric = finetuned_metric  # 使用微调后的指标作为备用

        # 保存完整的结果信息
        results = {
            'task': args.task,
            'mode': args.mode,
            'compression_level': args.compression_level,
            'training_params': {
                'epochs': args.epochs,
                'batch_size': args.batch_size,
                'learning_rate': args.learning_rate,
                'dataset_size': args.dataset_size,
                'img_size': args.img_size
            } if args.mode in ['train', 'both'] else None,
            'compression_params': {
                'bits': compression_params['bits'],
                'target_sparsity': compression_params['sparsity'],
                'actual_sparsity': float(actual_sparsity),
                'finetune_epochs': args.finetune_epochs
            },
            'model_sizes': {
                'original_memory_mb': float(original_size),
                'original_file_mb': float(original_file_size),
                'compressed_memory_mb': float(compressed_size),
                'compressed_file_mb': float(compressed_file_size),
                'file_compression_ratio': float(file_compression_ratio)
            },
            'performance_metrics': {
                f'original_{metric_name.lower()}': float(original_metric),
                f'compressed_{metric_name.lower()}': float(compressed_metric),
                f'finetuned_{metric_name.lower()}': float(finetuned_metric),
                f'loaded_{metric_name.lower()}': float(loaded_metric),
                f'{metric_name.lower()}_change_vs_original': float(finetuned_metric - original_metric)
            },
            'training_history': training_history
        }

        # 保存结果
        results_path = os.path.join(args.output_dir, f"complete_results_{args.task}.json")
        with open(results_path, 'w') as f:
            json.dump(results, f, indent=2)

        # 输出最终统计
        print("\n" + "=" * 60)
        print(f"云{args.task}最终结果统计")
        print("=" * 60)
        if training_history:
            print(f"训练最佳{metric_name}: {training_history['best_test_metric']:.4f}")
        print(f"原始模型{metric_name}: {original_metric:.4f}")
        print(f"压缩后模型{metric_name}: {compressed_metric:.4f}")
        print(f"微调后模型{metric_name}: {finetuned_metric:.4f}")
        print(f"加载后模型{metric_name}: {loaded_metric:.4f}")

        if args.task == 'detection':
            final_change = (finetuned_metric - original_metric) * 100
            print(f"最终{metric_name}变化: {final_change:+.2f}%")
        else:
            final_change = finetuned_metric - original_metric
            print(f"最终{metric_name}变化: {final_change:+.2f}dB")

        print(f"文件压缩率: {file_compression_ratio:.2f}倍")
        print(f"原始文件大小: {original_file_size:.2f} MB")
        print(f"压缩文件大小: {compressed_file_size:.2f} MB")
        print(f"量化位数: {compression_params['bits']}")
        print(f"目标稀疏度: {compression_params['sparsity']:.3f}")
        print(f"实际稀疏度: {actual_sparsity:.4f}")
        print(f"压缩级别: {args.compression_level}")

        # 压缩效果评估
        if args.task == 'detection':
            if abs(final_change) < 5.0:  # IoU变化小于5%
                print("✅ 压缩效果: 优秀 - 性能损失很小")
            elif abs(final_change) < 15.0:  # IoU变化小于15%
                print("⚠️  压缩效果: 良好 - 性能损失可接受")
            else:
                print("❌ 压缩效果: 较差 - 性能损失较大，建议降低压缩强度")
        else:  # removal
            if abs(final_change) > -2.0:  # PSNR下降小于2dB
                print("✅ 压缩效果: 优秀 - 性能损失很小")
            elif abs(final_change) > -5.0:  # PSNR下降小于5dB
                print("⚠️  压缩效果: 良好 - 性能损失可接受")
            else:
                print("❌ 压缩效果: 较差 - 性能损失较大，建议降低压缩强度")

        print(f"结果已保存到: {results_path}")

    print(f"\n卫星云{args.task}模型训练和压缩完成!")

    # 给出建议
    if args.mode in ['compress', 'both']:
        print("\n💡 建议:")
        if args.task == 'detection':
            if file_compression_ratio < 2:
                print("- 如需更高压缩率，可尝试 --compression-level moderate 或 aggressive")
            elif abs(final_change) > 10:
                print("- 如需更好性能，可尝试 --compression-level conservative 或增加 --finetune-epochs")
        else:
            if file_compression_ratio < 2:
                print("- 如需更高压缩率，可尝试 --compression-level moderate")
            elif abs(final_change) < -3:
                print("- 如需更好性能，可尝试 --compression-level conservative 或增加 --finetune-epochs")

        # print("- 可以通过调整 --bits 和 --sparsity 参数来精细控制压缩")
        # print("- 增加 --finetune-epochs 可以帮助恢复压缩后的性能损失")


if __name__ == "__main__":
    main()
