#!/usr/bin/env python
"""
卫星图像分割模型训练和压缩示例 - 完整版
支持多种分割模型的训练和压缩
确保实际减小保存的模型文件大小
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset, random_split
import numpy as np
import copy
import time
import json
import gzip
import matplotlib.pyplot as plt
from tqdm import tqdm


# 改进的卫星图像分割数据集
class SatelliteSegmentationDataset(Dataset):
    def __init__(self, size=1000, img_size=256, num_classes=7):
        self.size = size
        self.img_size = img_size
        self.num_classes = num_classes

        print(f"生成 {size} 个卫星图像分割样本...")
        self.data = []

        for i in tqdm(range(size)):
            img, mask = self._generate_satellite_sample()
            self.data.append((img, mask))

    def _generate_satellite_sample(self):
        """生成具有真实卫星图像特征的分割样本"""
        img = torch.zeros(3, self.img_size, self.img_size)
        mask = torch.zeros(1, self.img_size, self.img_size, dtype=torch.long)

        # 定义地物类别和对应的颜色
        # 0: 背景, 1: 水体, 2: 植被, 3: 建筑, 4: 道路, 5: 裸土, 6: 农田
        class_colors = {
            0: [0.2, 0.2, 0.2],  # 背景 - 深灰色
            1: [0.1, 0.3, 0.8],  # 水体 - 蓝色
            2: [0.2, 0.7, 0.2],  # 植被 - 绿色
            3: [0.7, 0.7, 0.7],  # 建筑 - 灰色
            4: [0.4, 0.4, 0.4],  # 道路 - 深灰色
            5: [0.8, 0.6, 0.4],  # 裸土 - 棕色
            6: [0.5, 0.8, 0.3]  # 农田 - 浅绿色
        }

        # 随机选择主要地形类型
        terrain_type = np.random.choice(['urban', 'rural', 'mixed', 'coastal'])

        if terrain_type == 'urban':
            # 城市场景：主要是建筑、道路和少量植被
            self._add_urban_features(img, mask, class_colors)
        elif terrain_type == 'rural':
            # 乡村场景：主要是农田、植被和水体
            self._add_rural_features(img, mask, class_colors)
        elif terrain_type == 'coastal':
            # 海岸场景：包含大量水体和混合地物
            self._add_coastal_features(img, mask, class_colors)
        else:  # mixed
            # 混合场景：各种地物都有
            self._add_mixed_features(img, mask, class_colors)

        # 添加噪声和细节
        noise = torch.randn_like(img) * 0.05
        img = torch.clamp(img + noise, 0, 1)

        return img, mask

    def _add_urban_features(self, img, mask, class_colors):
        """添加城市特征"""
        # 背景
        img += torch.tensor(class_colors[0]).view(3, 1, 1)

        # 添加建筑物
        num_buildings = np.random.randint(3, 8)
        for _ in range(num_buildings):
            w, h = np.random.randint(20, 60), np.random.randint(20, 60)
            x, y = np.random.randint(0, self.img_size - w), np.random.randint(0, self.img_size - h)

            color = torch.tensor(class_colors[3]).view(3, 1, 1)
            img[:, y:y + h, x:x + w] = color
            mask[0, y:y + h, x:x + w] = 3

        # 添加道路网络
        self._add_road_network(img, mask, class_colors)

        # 添加少量植被（公园等）
        num_parks = np.random.randint(1, 3)
        for _ in range(num_parks):
            center_x, center_y = np.random.randint(30, self.img_size - 30), np.random.randint(30, self.img_size - 30)
            radius = np.random.randint(15, 30)
            self._add_circular_region(img, mask, class_colors, 2, center_x, center_y, radius)

    def _add_rural_features(self, img, mask, class_colors):
        """添加乡村特征"""
        # 背景 - 裸土
        img += torch.tensor(class_colors[5]).view(3, 1, 1)
        mask[0, :, :] = 5

        # 添加农田（规整的矩形）
        num_fields = np.random.randint(2, 5)
        for _ in range(num_fields):
            w, h = np.random.randint(40, 80), np.random.randint(40, 80)
            x, y = np.random.randint(0, self.img_size - w), np.random.randint(0, self.img_size - h)

            color = torch.tensor(class_colors[6]).view(3, 1, 1)
            img[:, y:y + h, x:x + w] = color
            mask[0, y:y + h, x:x + w] = 6

        # 添加河流或池塘
        if np.random.random() > 0.3:
            self._add_water_body(img, mask, class_colors)

        # 添加植被（森林或树林）
        num_vegetation = np.random.randint(1, 4)
        for _ in range(num_vegetation):
            self._add_irregular_vegetation(img, mask, class_colors)

    def _add_coastal_features(self, img, mask, class_colors):
        """添加海岸特征"""
        # 大片水体
        water_height = np.random.randint(self.img_size // 3, 2 * self.img_size // 3)
        img[:, :water_height, :] = torch.tensor(class_colors[1]).view(3, 1, 1)
        mask[0, :water_height, :] = 1

        # 海岸线上的建筑
        num_buildings = np.random.randint(2, 5)
        for _ in range(num_buildings):
            w, h = np.random.randint(15, 40), np.random.randint(15, 40)
            x = np.random.randint(0, self.img_size - w)
            y = np.random.randint(water_height, self.img_size - h)

            color = torch.tensor(class_colors[3]).view(3, 1, 1)
            img[:, y:y + h, x:x + w] = color
            mask[0, y:y + h, x:x + w] = 3

        # 沙滩或裸土
        beach_height = np.random.randint(10, 30)
        beach_start = max(0, water_height - beach_height)
        img[:, beach_start:water_height + 20, :] = torch.tensor(class_colors[5]).view(3, 1, 1)
        mask[0, beach_start:water_height + 20, :] = 5

    def _add_mixed_features(self, img, mask, class_colors):
        """添加混合特征"""
        # 分区域添加不同地物
        regions = [
            (0, 0, self.img_size // 2, self.img_size // 2),  # 左上
            (self.img_size // 2, 0, self.img_size, self.img_size // 2),  # 右上
            (0, self.img_size // 2, self.img_size // 2, self.img_size),  # 左下
            (self.img_size // 2, self.img_size // 2, self.img_size, self.img_size)  # 右下
        ]

        region_types = np.random.choice([1, 2, 3, 6], 4, replace=True)

        for (x1, y1, x2, y2), region_type in zip(regions, region_types):
            color = torch.tensor(class_colors[region_type]).view(3, 1, 1)
            img[:, y1:y2, x1:x2] = color
            mask[0, y1:y2, x1:x2] = region_type

        # 添加道路连接各区域
        self._add_road_network(img, mask, class_colors)

    def _add_road_network(self, img, mask, class_colors):
        """添加道路网络"""
        # 主要道路
        road_width = 3
        color = torch.tensor(class_colors[4]).view(3, 1, 1)

        # 水平道路
        if np.random.random() > 0.5:
            road_y = np.random.randint(road_width, self.img_size - road_width)
            img[:, road_y - road_width:road_y + road_width, :] = color
            mask[0, road_y - road_width:road_y + road_width, :] = 4

        # 垂直道路
        if np.random.random() > 0.5:
            road_x = np.random.randint(road_width, self.img_size - road_width)
            img[:, :, road_x - road_width:road_x + road_width] = color
            mask[0, :, road_x - road_width:road_x + road_width] = 4

    def _add_water_body(self, img, mask, class_colors):
        """添加水体"""
        # 不规则形状的水体
        center_x, center_y = np.random.randint(40, self.img_size - 40), np.random.randint(40, self.img_size - 40)
        radius = np.random.randint(20, 40)

        y_coords, x_coords = torch.meshgrid(
            torch.arange(self.img_size),
            torch.arange(self.img_size),
            indexing='ij'
        )

        # 创建椭圆形水体
        a, b = radius, radius * np.random.uniform(0.5, 1.5)
        mask_water = ((x_coords - center_x) / a) ** 2 + ((y_coords - center_y) / b) ** 2 <= 1

        color = torch.tensor(class_colors[1]).view(3, 1, 1)
        for c in range(3):
            img[c][mask_water] = color[c, 0, 0]
        mask[0][mask_water] = 1

    def _add_circular_region(self, img, mask, class_colors, class_id, center_x, center_y, radius):
        """添加圆形区域"""
        y_coords, x_coords = torch.meshgrid(
            torch.arange(self.img_size),
            torch.arange(self.img_size),
            indexing='ij'
        )

        circular_mask = ((x_coords - center_x) ** 2 + (y_coords - center_y) ** 2) <= radius ** 2

        color = torch.tensor(class_colors[class_id]).view(3, 1, 1)
        for c in range(3):
            img[c][circular_mask] = color[c, 0, 0]
        mask[0][circular_mask] = class_id

    def _add_irregular_vegetation(self, img, mask, class_colors):
        """添加不规则植被"""
        # 使用随机形状生成植被区域
        w, h = np.random.randint(30, 60), np.random.randint(30, 60)
        x, y = np.random.randint(0, self.img_size - w), np.random.randint(0, self.img_size - h)

        # 创建不规则形状
        shape = torch.rand(h, w) > 0.3
        color = torch.tensor(class_colors[2]).view(3, 1, 1)

        for i in range(h):
            for j in range(w):
                if shape[i, j] and y + i < self.img_size and x + j < self.img_size:
                    img[:, y + i, x + j] = color.squeeze()
                    mask[0, y + i, x + j] = 2

    def __len__(self):
        return self.size

    def __getitem__(self, idx):
        return self.data[idx]


# 简化的U-Net模型
class SimpleUNet(nn.Module):
    def __init__(self, in_channels=3, num_classes=7):
        super(SimpleUNet, self).__init__()

        # 编码器
        self.enc1 = self._make_encoder_block(in_channels, 64)
        self.enc2 = self._make_encoder_block(64, 128)
        self.enc3 = self._make_encoder_block(128, 256)
        self.enc4 = self._make_encoder_block(256, 512)

        # 瓶颈层
        self.bottleneck = nn.Sequential(
            nn.Conv2d(512, 1024, 3, padding=1),
            nn.BatchNorm2d(1024),
            nn.ReLU(inplace=True),
            nn.Dropout2d(0.5)
        )

        # 上采样层
        self.upconv4 = nn.ConvTranspose2d(1024, 512, 2, stride=2)
        self.upconv3 = nn.ConvTranspose2d(512, 256, 2, stride=2)
        self.upconv2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
        self.upconv1 = nn.ConvTranspose2d(128, 64, 2, stride=2)

        # 解码器 - 跳跃连接后的处理
        self.dec4 = nn.Sequential(
            nn.Conv2d(512 + 512, 512, 3, padding=1),  # 512(upconv) + 512(skip) = 1024 -> 512
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True)
        )

        self.dec3 = nn.Sequential(
            nn.Conv2d(256 + 256, 256, 3, padding=1),  # 256(upconv) + 256(skip) = 512 -> 256
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True)
        )

        self.dec2 = nn.Sequential(
            nn.Conv2d(128 + 128, 128, 3, padding=1),  # 128(upconv) + 128(skip) = 256 -> 128
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True)
        )

        self.dec1 = nn.Sequential(
            nn.Conv2d(64 + 64, 64, 3, padding=1),  # 64(upconv) + 64(skip) = 128 -> 64
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, 3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True)
        )

        # 输出层
        self.output = nn.Conv2d(64, num_classes, 1)

        # 池化
        self.pool = nn.MaxPool2d(2)

    def _make_encoder_block(self, in_channels, out_channels):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        # 编码器路径
        enc1 = self.enc1(x)  # [B, 64, H, W]
        enc1_pool = self.pool(enc1)  # [B, 64, H/2, W/2]

        enc2 = self.enc2(enc1_pool)  # [B, 128, H/2, W/2]
        enc2_pool = self.pool(enc2)  # [B, 128, H/4, W/4]

        enc3 = self.enc3(enc2_pool)  # [B, 256, H/4, W/4]
        enc3_pool = self.pool(enc3)  # [B, 256, H/8, W/8]

        enc4 = self.enc4(enc3_pool)  # [B, 512, H/8, W/8]
        enc4_pool = self.pool(enc4)  # [B, 512, H/16, W/16]

        # 瓶颈
        bottleneck = self.bottleneck(enc4_pool)  # [B, 1024, H/16, W/16]

        # 解码器路径
        # 第4层解码
        up4 = self.upconv4(bottleneck)  # [B, 512, H/8, W/8]
        if up4.size()[2:] != enc4.size()[2:]:
            up4 = F.interpolate(up4, size=enc4.shape[2:], mode='bilinear', align_corners=False)
        merge4 = torch.cat([up4, enc4], dim=1)  # [B, 1024, H/8, W/8]
        dec4 = self.dec4(merge4)  # [B, 512, H/8, W/8]

        # 第3层解码
        up3 = self.upconv3(dec4)  # [B, 256, H/4, W/4]
        if up3.size()[2:] != enc3.size()[2:]:
            up3 = F.interpolate(up3, size=enc3.shape[2:], mode='bilinear', align_corners=False)
        merge3 = torch.cat([up3, enc3], dim=1)  # [B, 512, H/4, W/4]
        dec3 = self.dec3(merge3)  # [B, 256, H/4, W/4]

        # 第2层解码
        up2 = self.upconv2(dec3)  # [B, 128, H/2, W/2]
        if up2.size()[2:] != enc2.size()[2:]:
            up2 = F.interpolate(up2, size=enc2.shape[2:], mode='bilinear', align_corners=False)
        merge2 = torch.cat([up2, enc2], dim=1)  # [B, 256, H/2, W/2]
        dec2 = self.dec2(merge2)  # [B, 128, H/2, W/2]

        # 第1层解码
        up1 = self.upconv1(dec2)  # [B, 64, H, W]
        if up1.size()[2:] != enc1.size()[2:]:
            up1 = F.interpolate(up1, size=enc1.shape[2:], mode='bilinear', align_corners=False)
        merge1 = torch.cat([up1, enc1], dim=1)  # [B, 128, H, W]
        dec1 = self.dec1(merge1)  # [B, 64, H, W]

        # 输出
        output = self.output(dec1)  # [B, num_classes, H, W]
        return output


def get_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="卫星图像分割模型训练和压缩示例")

    # 模型选择
    parser.add_argument("--model", "-m", default="simple_unet",
                        choices=["simple_unet", "unet", "deeplabv3plus", "segformer"],
                        help="要使用的模型架构")

    # 数据和输出参数
    parser.add_argument("--data-dir", "-d", default="./data",
                        help="卫星分割数据集目录路径")
    parser.add_argument("--output-dir", "-o", default="./output",
                        help="模型和结果的输出目录")
    parser.add_argument("--num-classes", type=int, default=7,
                        help="分割类别数量 (默认: 7)")

    # 训练参数
    parser.add_argument("--epochs", "-e", type=int, default=25,
                        help="训练轮数 (默认: 25)")
    parser.add_argument("--batch-size", "-b", type=int, default=8,
                        help="批次大小 (默认: 8)")
    parser.add_argument("--learning-rate", "-lr", type=float, default=0.001,
                        help="学习率 (默认: 0.001)")
    parser.add_argument("--weight-decay", type=float, default=1e-4,
                        help="权重衰减 (默认: 1e-4)")
    parser.add_argument("--dataset-size", type=int, default=1000,
                        help="数据集大小 (默认: 1000)")
    parser.add_argument("--img-size", type=int, default=256,
                        help="输入图像大小 (默认: 256)")

    # 压缩参数
    parser.add_argument("--bits", type=int, default=4,
                        help="量化位数 (默认: 4位)")
    parser.add_argument("--sparsity", type=float, default=0.7,
                        help="剪枝稀疏度 (默认: 0.7)")

    # 模式选择
    parser.add_argument("--mode", choices=['train', 'compress', 'both'], default='both',
                        help="运行模式: train(仅训练), compress(仅压缩), both(训练+压缩)")
    parser.add_argument("--pretrained-path", type=str, default=None,
                        help="预训练模型路径 (用于仅压缩模式)")
    parser.add_argument("--use-pretrained", action="store_true",
                        help="使用预训练权重初始化（仅适用于标准模型）")

    return parser.parse_args()


def load_segmentation_model(model_name, num_classes=7, use_pretrained=False):
    """加载分割模型"""
    print(f"加载 {model_name} 模型...")

    if model_name == "simple_unet":
        model = SimpleUNet(in_channels=3, num_classes=num_classes)
        print("使用自定义简化U-Net模型")

    elif model_name == "unet":
        try:
            from segmentation_models_pytorch import Unet
            model = Unet(
                encoder_name="resnet34",
                encoder_weights="imagenet" if use_pretrained else None,
                in_channels=3,
                classes=num_classes,
            )
            print("使用segmentation_models_pytorch的U-Net模型")
        except ImportError:
            print("segmentation_models_pytorch不可用，使用自定义U-Net")
            model = SimpleUNet(in_channels=3, num_classes=num_classes)

    elif model_name == "deeplabv3plus":
        try:
            from segmentation_models_pytorch import DeepLabV3Plus
            model = DeepLabV3Plus(
                encoder_name="resnet50",
                encoder_weights="imagenet" if use_pretrained else None,
                in_channels=3,
                classes=num_classes,
            )
            print("使用segmentation_models_pytorch的DeepLabV3+模型")
        except ImportError:
            from torchvision.models.segmentation import deeplabv3_resnet50, DeepLabV3_ResNet50_Weights
            if use_pretrained:
                model = deeplabv3_resnet50(weights=DeepLabV3_ResNet50_Weights.DEFAULT)
                model.classifier[4] = nn.Conv2d(256, num_classes, kernel_size=1)
            else:
                model = deeplabv3_resnet50(num_classes=num_classes)
            print("使用torchvision的DeepLabV3模型")

    elif model_name == "segformer":
        try:
            from transformers import SegformerForSemanticSegmentation
            model = SegformerForSemanticSegmentation.from_pretrained(
                "nvidia/segformer-b0-finetuned-ade-512-512",
                num_labels=num_classes,
                ignore_mismatched_sizes=True,
            )
            print("使用transformers的SegFormer模型")
        except Exception as e:
            print(f"无法加载SegFormer: {e}, 使用自定义U-Net")
            model = SimpleUNet(in_channels=3, num_classes=num_classes)
    else:
        raise ValueError(f"不支持的模型: {model_name}")

    return model


def create_dataloaders(data_dir, batch_size=8, dataset_size=1000, img_size=256, num_classes=7):
    """创建卫星图像分割数据集加载器"""
    print("准备卫星图像分割数据集...")

    # 创建完整数据集
    full_dataset = SatelliteSegmentationDataset(
        size=dataset_size,
        img_size=img_size,
        num_classes=num_classes
    )

    # 划分训练集和测试集 (80%训练, 20%测试)
    train_size = int(0.8 * len(full_dataset))
    test_size = len(full_dataset) - train_size
    train_dataset, test_dataset = random_split(full_dataset, [train_size, test_size])

    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=2,
        pin_memory=True
    )

    test_loader = DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=2,
        pin_memory=True
    )

    print(f"训练集大小: {len(train_dataset)}")
    print(f"测试集大小: {len(test_dataset)}")

    return train_loader, test_loader


def dice_loss(pred, target, smooth=1e-5):
    """Dice损失函数"""
    pred = torch.softmax(pred, dim=1)
    target_one_hot = F.one_hot(target.squeeze(1).long(), num_classes=pred.size(1))
    target_one_hot = target_one_hot.permute(0, 3, 1, 2).float()

    intersection = (pred * target_one_hot).sum(dim=(2, 3))
    dice = (2.0 * intersection + smooth) / (pred.sum(dim=(2, 3)) + target_one_hot.sum(dim=(2, 3)) + smooth)

    return 1 - dice.mean()


def focal_loss(pred, target, alpha=1, gamma=2):
    """Focal损失函数，处理类别不平衡"""
    ce_loss = F.cross_entropy(pred, target.squeeze(1), reduction='none')
    pt = torch.exp(-ce_loss)
    focal_loss = alpha * (1 - pt) ** gamma * ce_loss
    return focal_loss.mean()


def combined_loss(pred, target, alpha=0.7):
    """结合交叉熵、Dice和Focal损失"""
    ce_loss = F.cross_entropy(pred, target.squeeze(1))
    d_loss = dice_loss(pred, target)
    f_loss = focal_loss(pred, target)

    return alpha * ce_loss + (1 - alpha) * (0.5 * d_loss + 0.5 * f_loss)


def calculate_miou(pred, target, num_classes):
    """计算mIoU（平均交并比）"""
    pred = torch.softmax(pred, dim=1)
    pred = torch.argmax(pred, dim=1)
    target = target.squeeze(1)

    ious = []
    for cls in range(num_classes):
        pred_cls = (pred == cls)
        target_cls = (target == cls)

        intersection = (pred_cls & target_cls).sum().float()
        union = (pred_cls | target_cls).sum().float()

        if union == 0:
            iou = 1.0 if intersection == 0 else 0.0
        else:
            iou = (intersection / union).item()  # 确保转换为Python float

        ious.append(iou)

    return np.mean(ious)


def train_model(model, train_loader, test_loader, device, args):
    """训练分割模型"""
    print("开始训练卫星图像分割模型...")

    # 定义优化器
    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)

    # 记录训练历史
    train_losses = []
    train_mious = []
    test_mious = []

    best_test_miou = 0.0
    best_model_state = None

    # 训练循环
    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        running_miou = 0.0
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, (images, masks) in enumerate(pbar):
            images, masks = images.to(device), masks.to(device)

            # 前向传播
            optimizer.zero_grad()

            if args.model == 'segformer':
                # SegFormer模型的特殊处理
                outputs = model(pixel_values=images)
                outputs = outputs.logits
                # 上采样到输入尺寸
                outputs = F.interpolate(outputs, size=masks.shape[-2:], mode='bilinear', align_corners=False)
            else:
                outputs = model(images)
                if isinstance(outputs, dict):  # torchvision模型返回字典
                    outputs = outputs['out']

            # 计算损失
            loss = combined_loss(outputs, masks)

            # 反向传播
            loss.backward()
            optimizer.step()

            # 计算mIoU
            miou = calculate_miou(outputs, masks, args.num_classes)

            # 统计
            running_loss += loss.item()
            running_miou += miou
            num_batches += 1

            # 更新进度条
            pbar.set_postfix({
                'Loss': f'{loss.item():.4f}',
                'mIoU': f'{miou:.4f}'
            })

        # 计算平均训练损失和mIoU
        avg_train_loss = running_loss / num_batches
        train_miou = running_miou / num_batches

        # 测试阶段
        test_miou = evaluate_segmentation_model(model, test_loader, device, args.model, args.num_classes)

        # 记录历史
        train_losses.append(avg_train_loss)
        train_mious.append(train_miou)
        test_mious.append(test_miou)

        # 保存最佳模型
        if test_miou > best_test_miou:
            best_test_miou = test_miou
            best_model_state = copy.deepcopy(model.state_dict())

        # 更新学习率
        scheduler.step()

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train mIoU: {train_miou:.4f}, '
              f'Test mIoU: {test_miou:.4f}')

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f'训练完成! 最佳测试mIoU: {best_test_miou:.4f}')

    # 绘制训练曲线
    plot_training_curves(train_losses, train_mious, test_mious, args.output_dir)

    return model, best_test_miou, {
        'train_losses': train_losses,
        'train_mious': train_mious,
        'test_mious': test_mious,
        'best_test_miou': best_test_miou
    }


def evaluate_segmentation_model(model, dataloader, device, model_name="simple_unet", num_classes=7):
    """评估分割模型性能（使用mIoU指标）"""
    model.eval()
    total_miou = 0.0
    num_batches = 0

    with torch.no_grad():
        for images, masks in dataloader:
            images, masks = images.to(device), masks.to(device)

            if model_name == 'segformer':
                outputs = model(pixel_values=images)
                outputs = outputs.logits
                outputs = F.interpolate(outputs, size=masks.shape[-2:], mode='bilinear', align_corners=False)
            else:
                outputs = model(images)
                if isinstance(outputs, dict):
                    outputs = outputs['out']

            miou = calculate_miou(outputs, masks, num_classes)
            total_miou += miou
            num_batches += 1

    average_miou = total_miou / num_batches if num_batches > 0 else 0
    return average_miou


def plot_training_curves(train_losses, train_mious, test_mious, output_dir):
    """绘制训练曲线"""
    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(12, 4))

    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(epochs, train_losses, 'b-', label='Training Loss')
    plt.title('Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)

    # 绘制mIoU曲线
    plt.subplot(1, 2, 2)
    plt.plot(epochs, train_mious, 'b-', label='Training mIoU')
    plt.plot(epochs, test_mious, 'r-', label='Test mIoU')
    plt.title('Training and Test mIoU')
    plt.xlabel('Epoch')
    plt.ylabel('mIoU')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'training_curves.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"训练曲线已保存到: {os.path.join(output_dir, 'training_curves.png')}")


def visualize_segmentation_results(model, dataloader, device, output_dir, model_name="simple_unet", num_samples=4):
    """可视化分割结果"""
    model.eval()

    fig, axes = plt.subplots(num_samples, 3, figsize=(12, 4 * num_samples))
    fig.suptitle('Satellite Image Segmentation Results', fontsize=16)  # 使用英文标题

    class_names = ['Background', 'Water', 'Vegetation', 'Building', 'Road', 'Bare Soil', 'Farmland']  # 英文类别名
    colors = ['gray', 'blue', 'green', 'silver', 'black', 'brown', 'lightgreen']

    with torch.no_grad():
        for i, (images, masks) in enumerate(dataloader):
            if i >= num_samples:
                break

            images, masks = images.to(device), masks.to(device)

            if model_name == 'segformer':
                outputs = model(pixel_values=images)
                outputs = outputs.logits
                outputs = F.interpolate(outputs, size=masks.shape[-2:], mode='bilinear', align_corners=False)
            else:
                outputs = model(images)
                if isinstance(outputs, dict):
                    outputs = outputs['out']

            pred = torch.softmax(outputs, dim=1)
            pred = torch.argmax(pred, dim=1)

            # 取第一个样本
            img_show = images[0].cpu().permute(1, 2, 0).numpy()
            mask_true = masks[0, 0].cpu().numpy()
            mask_pred = pred[0].cpu().numpy()

            # 归一化显示
            img_show = np.clip(img_show, 0, 1)

            axes[i, 0].imshow(img_show)
            axes[i, 0].set_title('Original Image')
            axes[i, 0].axis('off')

            axes[i, 1].imshow(mask_true, cmap='tab10', vmin=0, vmax=6)
            axes[i, 1].set_title('Ground Truth')
            axes[i, 1].axis('off')

            axes[i, 2].imshow(mask_pred, cmap='tab10', vmin=0, vmax=6)
            axes[i, 2].set_title('Prediction')
            axes[i, 2].axis('off')

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'segmentation_results.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"分割结果已保存到: {os.path.join(output_dir, 'segmentation_results.png')}")


def visualize_dataset_samples(dataloader, output_dir, num_samples=8):
    """可视化数据集样本"""
    fig, axes = plt.subplots(2, 4, figsize=(16, 8))
    fig.suptitle('Satellite Image Segmentation Dataset Samples', fontsize=16)  # 使用英文标题
    axes = axes.flatten()

    for i, (images, masks) in enumerate(dataloader):
        if i >= num_samples:
            break

        # 取第一个样本
        img_show = images[0].permute(1, 2, 0).numpy()
        mask_show = masks[0, 0].numpy()

        # 归一化显示
        img_show = np.clip(img_show, 0, 1)

        # 创建叠加显示
        axes[i].imshow(img_show)
        axes[i].imshow(mask_show, alpha=0.5, cmap='tab10', vmin=0, vmax=6)
        axes[i].set_title(f'Sample {i + 1}')  # 使用英文标题
        axes[i].axis('off')

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'dataset_samples.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"数据集样本已保存到: {os.path.join(output_dir, 'dataset_samples.png')}")


def get_model_size(model):
    """计算模型在内存中的大小（MB）"""
    param_size = 0
    for param in model.parameters():
        param_size += param.nelement() * param.element_size()
    size_mb = param_size / (1024 * 1024)
    return size_mb


def compute_compressed_size(model, bits=4, sparsity=0.7):
    """计算压缩后模型的理论存储大小（MB）"""
    total_bytes = 0
    total_params = 0
    nonzero_params = 0

    for name, param in model.named_parameters():
        param_count = param.numel()
        total_params += param_count

        if 'weight' in name:
            nz_count = torch.count_nonzero(param).item()
            nonzero_params += nz_count
            sparse_size = nz_count * (4 + bits / 8)
            dense_size = param_count * (bits / 8)

            if sparsity > 0 and sparse_size < dense_size:
                total_bytes += sparse_size
            else:
                total_bytes += dense_size
        elif 'bias' in name:
            total_bytes += param_count * 4

    overhead_bytes = total_bytes * 0.05
    total_size_mb = (total_bytes + overhead_bytes) / (1024 * 1024)
    actual_sparsity = 1.0 - (nonzero_params / total_params) if total_params > 0 else 0

    return total_size_mb, actual_sparsity


def quantize_weights(weight, bits=8):
    """量化权重到指定位数"""
    qmin, qmax = 0, 2 ** bits - 1
    min_val, max_val = weight.min(), weight.max()

    if min_val == max_val:
        return weight.clone(), None, None, None

    scale = (max_val - min_val) / (qmax - qmin)
    zero_point = qmin - min_val / scale

    q_weight = torch.round(weight / scale + zero_point)
    q_weight = torch.clamp(q_weight, qmin, qmax)
    dq_weight = (q_weight - zero_point) * scale

    return dq_weight, scale, zero_point, q_weight


def prune_weights(weight, sparsity=0.7):
    """按稀疏度修剪权重"""
    if sparsity <= 0:
        return weight.clone(), None

    weight_abs = weight.abs().flatten()
    k = int(weight_abs.numel() * sparsity)

    if k >= weight_abs.numel():
        return torch.zeros_like(weight), torch.zeros_like(weight).bool()

    threshold = torch.kthvalue(weight_abs, k).values
    mask = (weight.abs() >= threshold)
    pruned = weight * mask.float()

    return pruned, mask


def process_module_for_compression(module, module_path, bits, sparsity, compressed_layers):
    """递归处理模块进行压缩"""
    for name, child in module.named_children():
        child_path = f"{module_path}.{name}" if module_path else name
        process_module_for_compression(child, child_path, bits, sparsity, compressed_layers)

    if hasattr(module, 'weight') and module.weight is not None:
        with torch.no_grad():
            pruned_weight, mask = prune_weights(module.weight.data, sparsity)

            if mask is not None and torch.any(mask):
                quantized_weight, scale, zero_point, q_weight = quantize_weights(pruned_weight, bits)
                module.weight.data.copy_(quantized_weight)

                if scale is not None:
                    nonzero_indices = mask.nonzero(as_tuple=False).cpu().numpy().tolist()
                    nonzero_values = q_weight[mask].to(torch.uint8).cpu().numpy().tolist()

                    compressed_layers[module_path + '.weight'] = {
                        'shape': list(module.weight.shape),
                        'indices': nonzero_indices,
                        'values': nonzero_values,
                        'scale': float(scale.item()),
                        'zero_point': float(zero_point.item()),
                        'bits': bits
                    }

            if hasattr(module, 'bias') and module.bias is not None:
                compressed_layers[module_path + '.bias'] = {
                    'shape': list(module.bias.shape),
                    'values': module.bias.data.cpu().numpy().tolist(),
                    'is_bias': True
                }


def compress_model(model, bits=4, sparsity=0.7):
    """压缩模型，对卷积和全连接层应用剪枝和量化"""
    print(f"压缩模型（量化位数={bits}, 剪枝稀疏度={sparsity}）...")
    compressed_model = copy.deepcopy(model)

    compressed_layers = {}
    model_info = {
        'model_type': str(type(model).__name__),
        'compressed_layers': compressed_layers
    }

    process_module_for_compression(compressed_model, '', bits, sparsity, compressed_layers)

    # 计算整体稀疏度
    total_elements = 0
    zero_elements = 0

    for name, param in compressed_model.named_parameters():
        if 'weight' in name:
            total_elements += param.numel()
            zero_elements += param.numel() - torch.count_nonzero(param).item()

    overall_sparsity = zero_elements / total_elements if total_elements > 0 else 0
    print(f"整体稀疏度: {overall_sparsity:.4f}")

    return compressed_model, model_info, overall_sparsity


def save_original_model(model, path):
    """保存原始模型，计算文件大小"""
    torch.save(model.state_dict(), path)
    return os.path.getsize(path)


def save_compressed_model(compressed_info, path):
    """将压缩信息保存为紧凑的文件"""
    json_str = json.dumps(compressed_info)
    with gzip.open(path, 'wb', compresslevel=9) as f:
        f.write(json_str.encode('utf-8'))
    return os.path.getsize(path)


def load_compressed_model(path, model_name="simple_unet", num_classes=7, device=None):
    """从压缩文件加载模型"""
    with gzip.open(path, 'rb') as f:
        json_str = f.read().decode('utf-8')

    compressed_info = json.loads(json_str)
    model = load_segmentation_model(model_name, num_classes, use_pretrained=False)

    def restore_compressed_params(module, module_path=''):
        for name, child in module.named_children():
            child_path = f"{module_path}.{name}" if module_path else name
            restore_compressed_params(child, child_path)

        weight_path = module_path + '.weight'
        bias_path = module_path + '.bias'

        if hasattr(module, 'weight') and weight_path in compressed_info['compressed_layers']:
            info = compressed_info['compressed_layers'][weight_path]
            shape = info['shape']
            module.weight.data = torch.zeros(shape)

            if 'indices' in info and 'values' in info:
                for idx, (indices, value) in enumerate(zip(info['indices'], info['values'])):
                    index = tuple(indices)
                    dequantized_val = (value - info['zero_point']) * info['scale']
                    module.weight.data[index] = dequantized_val

        if hasattr(module, 'bias') and module.bias is not None and bias_path in compressed_info['compressed_layers']:
            info = compressed_info['compressed_layers'][bias_path]
            if 'is_bias' in info and info['is_bias']:
                module.bias.data = torch.tensor(info['values'])

    restore_compressed_params(model)

    if device:
        model = model.to(device)

    return model


def main():
    """主函数"""
    args = get_args()

    # 确保输出目录存在
    os.makedirs(args.output_dir, exist_ok=True)

    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 创建或加载模型
    model = load_segmentation_model(args.model, args.num_classes, args.use_pretrained)
    model = model.to(device)

    trained_model_path = os.path.join(args.output_dir, f"{args.model}_segmentation_trained.pth")
    training_history = None

    # 训练模式
    if args.mode in ['train', 'both']:
        print("=" * 50)
        print("开始卫星图像分割训练阶段")
        print("=" * 50)

        # 创建数据加载器
        train_loader, test_loader = create_dataloaders(
            args.data_dir, args.batch_size, args.dataset_size,
            args.img_size, args.num_classes
        )

        # 可视化数据集样本
        visualize_dataset_samples(train_loader, args.output_dir)

        # 训练模型
        model, best_miou, training_history = train_model(
            model, train_loader, test_loader, device, args
        )

        # 保存训练好的模型
        torch.save(model.state_dict(), trained_model_path)
        print(f"训练好的模型已保存到: {trained_model_path}")

        # 可视化分割结果
        visualize_segmentation_results(model, test_loader, device, args.output_dir, args.model)

        # 保存训练历史
        history_path = os.path.join(args.output_dir, "training_history.json")
        with open(history_path, 'w') as f:
            json.dump(training_history, f, indent=2)

    # 仅压缩模式：加载预训练模型
    elif args.mode == 'compress':
        if args.pretrained_path and os.path.exists(args.pretrained_path):
            model.load_state_dict(torch.load(args.pretrained_path, map_location=device))
            print(f"已加载预训练模型: {args.pretrained_path}")
        elif os.path.exists(trained_model_path):
            model.load_state_dict(torch.load(trained_model_path, map_location=device))
            print(f"已加载训练好的模型: {trained_model_path}")
        else:
            print("警告: 没有找到预训练模型，将使用随机初始化的模型进行压缩")
            # print("test")

    # 压缩模式
    if args.mode in ['compress', 'both']:
        print("=" * 50)
        print("开始压缩阶段")
        print("=" * 50)

        # 创建测试数据加载器用于评估
        _, test_loader = create_dataloaders(
            args.data_dir, args.batch_size, args.dataset_size,
            args.img_size, args.num_classes
        )

        # 评估原始模型
        print("评估原始模型...")
        original_miou = evaluate_segmentation_model(model, test_loader, device, args.model, args.num_classes)

        # 计算原始模型大小
        original_size = get_model_size(model)
        original_theoretical_size = compute_compressed_size(model, 32, 0)[0]

        # 保存原始模型并计算文件大小
        original_path = os.path.join(args.output_dir, f"{args.model}_segmentation_original.pth")
        original_file_size_bytes = save_original_model(model, original_path)
        original_file_size = original_file_size_bytes / (1024 * 1024)

        print(f"原始模型mIoU: {original_miou:.4f}")
        print(f"原始模型内存大小: {original_size:.2f} MB")
        print(f"原始模型文件大小: {original_file_size:.2f} MB")

        # 压缩模型
        start_time = time.time()
        compressed_model, compressed_info, actual_sparsity = compress_model(
            model, args.bits, args.sparsity
        )
        compression_time = time.time() - start_time
        print(f"压缩完成，耗时: {compression_time:.2f} 秒")

        # 评估压缩后的模型
        print("评估压缩后的模型...")
        compressed_model = compressed_model.to(device)
        compressed_miou = evaluate_segmentation_model(compressed_model, test_loader, device, args.model,
                                                      args.num_classes)

        # 计算压缩后的大小
        compressed_size = get_model_size(compressed_model)
        compressed_theoretical_size = compute_compressed_size(
            compressed_model, args.bits, args.sparsity
        )[0]

        # 保存压缩模型并计算实际文件大小
        compressed_path = os.path.join(args.output_dir, f"{args.model}_segmentation_compressed.pth")
        compressed_file_size_bytes = save_compressed_model(compressed_info, compressed_path)
        compressed_file_size = compressed_file_size_bytes / (1024 * 1024)

        print(f"压缩后模型mIoU: {compressed_miou:.4f}")
        print(f"压缩后模型内存大小: {compressed_size:.2f} MB")
        print(f"压缩后模型文件大小: {compressed_file_size:.2f} MB")
        print(f"实际稀疏度: {actual_sparsity:.4f}")

        # 计算压缩率
        file_compression_ratio = original_file_size / compressed_file_size

        # 测试加载压缩模型
        print("测试加载压缩模型...")
        loaded_model = load_compressed_model(compressed_path, args.model, args.num_classes, device)
        loaded_miou = evaluate_segmentation_model(loaded_model, test_loader, device, args.model, args.num_classes)
        print(f"加载后模型mIoU: {loaded_miou:.4f}")

        # 保存完整的结果信息
        results = {
            'model': args.model,
            'mode': args.mode,
            'task': 'satellite_segmentation',
            'num_classes': args.num_classes,
            'use_pretrained': args.use_pretrained,
            'training_params': {
                'epochs': args.epochs,
                'batch_size': args.batch_size,
                'learning_rate': args.learning_rate,
                'dataset_size': args.dataset_size,
                'img_size': args.img_size
            } if args.mode in ['train', 'both'] else None,
            'compression_params': {
                'bits': args.bits,
                'target_sparsity': args.sparsity,
                'actual_sparsity': float(actual_sparsity)
            },
            'model_sizes': {
                'original_memory_mb': float(original_size),
                'original_file_mb': float(original_file_size),
                'compressed_memory_mb': float(compressed_size),
                'compressed_file_mb': float(compressed_file_size),
                'file_compression_ratio': float(file_compression_ratio)
            },
            'performance_metrics': {
                'original_miou': float(original_miou),
                'compressed_miou': float(compressed_miou),
                'loaded_miou': float(loaded_miou),
                'miou_drop': float(original_miou - compressed_miou)
            },
            'training_history': training_history
        }

        # 保存结果
        results_path = os.path.join(args.output_dir, "complete_results.json")
        with open(results_path, 'w') as f:
            json.dump(results, f, indent=2)

        # 输出最终统计
        print("\n" + "=" * 60)
        print("卫星图像分割最终结果统计")
        print("=" * 60)
        if training_history:
            print(f"训练最佳mIoU: {training_history['best_test_miou']:.4f}")
        print(f"原始模型mIoU: {original_miou:.4f}")
        print(f"压缩后模型mIoU: {compressed_miou:.4f}")
        print(f"mIoU下降: {(original_miou - compressed_miou) * 100:.2f}%")
        print(f"文件压缩率: {file_compression_ratio:.2f}倍")
        print(f"原始文件大小: {original_file_size:.2f} MB")
        print(f"压缩文件大小: {compressed_file_size:.2f} MB")
        print(f"量化位数: {args.bits}")
        print(f"实际稀疏度: {actual_sparsity:.4f}")
        print(f"结果已保存到: {results_path}")

    print("\n卫星图像分割模型训练和压缩完成!")


if __name__ == "__main__":
    main()