#!/usr/bin/env python
"""
改进的遥感图像分类模型训练和压缩示例 - ResNet50版本
解决过拟合问题，优化压缩策略，提升模型性能
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import DataLoader, Dataset, random_split
import copy
import time
import json
import gzip
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from collections import OrderedDict
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib

matplotlib.use('Agg')  # 使用非交互式后端
plt.rcParams['font.sans-serif'] = ['DejaVu Sans', 'Arial', 'sans-serif']  # 设置字体


# 改进的遥感图像分类数据集类
class ImprovedRemoteSensingDataset(Dataset):
    def __init__(self, size=2000, num_classes=10, img_size=224, split='train', augment=True):
        self.size = size
        self.num_classes = num_classes
        self.img_size = img_size
        self.split = split
        self.augment = augment and (split == 'train')

        # 使用不同的随机种子来确保训练和测试数据的差异
        if split == 'train':
            np.random.seed(42)
            torch.manual_seed(42)
        else:
            np.random.seed(123)
            torch.manual_seed(123)

        # 定义数据增强
        if self.augment:
            self.transform = transforms.Compose([
                transforms.RandomHorizontalFlip(p=0.5),
                transforms.RandomVerticalFlip(p=0.3),
                transforms.RandomRotation(degrees=15),
                transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
                transforms.RandomResizedCrop(img_size, scale=(0.8, 1.0)),
                transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            ])
        else:
            self.transform = transforms.Compose([
                transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            ])

        # 预生成数据
        print(f"生成 {size} 个{split}遥感图像分类样本...")
        self.data = []
        self.labels = []

        for i in tqdm(range(size)):
            img, label = self._generate_complex_remote_sensing_image(i % num_classes)
            self.data.append(img)
            self.labels.append(label)

        # 重置随机种子
        np.random.seed()
        torch.manual_seed(int(time.time()))

    def _generate_complex_remote_sensing_image(self, class_id):
        """生成更复杂和现实的遥感图像，增加变异性和噪声"""
        img = torch.zeros(3, self.img_size, self.img_size)

        # 添加基础噪声
        base_noise = torch.randn(3, self.img_size, self.img_size) * 0.08
        img += base_noise

        # 添加季节和天气变化因子
        season_factor = np.random.choice([0.8, 1.0, 1.2, 0.9])  # 模拟不同季节
        weather_factor = np.random.choice([0.7, 1.0, 1.3])  # 模拟不同天气

        # 根据类别生成不同的地物特征
        if class_id == 0:  # 农田
            # 更复杂的农田模式
            base_color = torch.tensor([0.1 + np.random.random() * 0.2,
                                       0.4 + np.random.random() * 0.3,
                                       0.05 + np.random.random() * 0.2]).view(3, 1, 1)
            img += base_color * season_factor

            # 多种农田模式
            field_patterns = np.random.choice(['strips', 'patches', 'circles', 'mixed'], p=[0.3, 0.3, 0.2, 0.2])

            if field_patterns == 'strips':
                strip_width = np.random.randint(10, 40)
                for i in range(0, self.img_size, strip_width * 2):
                    variation = torch.randn(3, 1, 1) * 0.15
                    img[:, i:i + strip_width, :] += variation
            elif field_patterns == 'patches':
                for _ in range(np.random.randint(4, 12)):
                    x, y = np.random.randint(0, self.img_size - 20), np.random.randint(0, self.img_size - 20)
                    w, h = np.random.randint(15, 50), np.random.randint(15, 50)
                    patch_color = torch.randn(3, 1, 1) * 0.2
                    end_x, end_y = min(x + w, self.img_size), min(y + h, self.img_size)
                    img[:, y:end_y, x:end_x] += patch_color
            elif field_patterns == 'mixed':
                # 混合模式
                for _ in range(np.random.randint(2, 6)):
                    pattern_type = np.random.choice(['strip', 'patch'])
                    if pattern_type == 'strip':
                        if np.random.random() > 0.5:  # 水平条纹
                            y_start = np.random.randint(0, self.img_size - 20)
                            strip_height = np.random.randint(8, 25)
                            variation = torch.randn(3, 1, 1) * 0.12
                            img[:, y_start:y_start + strip_height, :] += variation
                        else:  # 垂直条纹
                            x_start = np.random.randint(0, self.img_size - 20)
                            strip_width = np.random.randint(8, 25)
                            variation = torch.randn(3, 1, 1) * 0.12
                            img[:, :, x_start:x_start + strip_width] += variation

            # 添加灌溉渠道
            if np.random.random() > 0.7:
                channel_type = np.random.choice(['horizontal', 'vertical', 'diagonal'])
                if channel_type == 'horizontal':
                    y = np.random.randint(20, self.img_size - 20)
                    img[:, y - 1:y + 2, :] = torch.tensor([0.2, 0.3, 0.5]).view(3, 1, 1)
                elif channel_type == 'vertical':
                    x = np.random.randint(20, self.img_size - 20)
                    img[:, :, x - 1:x + 2] = torch.tensor([0.2, 0.3, 0.5]).view(3, 1, 1)

        elif class_id == 1:  # 森林
            # 更真实的森林纹理
            canopy_density = 0.6 + np.random.random() * 0.4
            base_color = torch.tensor([0.03 + canopy_density * 0.12,
                                       0.25 + canopy_density * 0.25,
                                       0.03 + canopy_density * 0.12]).view(3, 1, 1)
            img += base_color * season_factor

            # 多尺度森林纹理
            for scale in [3, 8, 15, 25]:
                y, x = torch.meshgrid(torch.arange(self.img_size), torch.arange(self.img_size), indexing='ij')
                forest_texture = 0.08 * torch.sin(2 * np.pi * x / scale + np.random.random() * 2 * np.pi) * \
                                 torch.cos(2 * np.pi * y / scale + np.random.random() * 2 * np.pi)
                img += forest_texture.unsqueeze(0) * (0.2 + np.random.random() * 0.3)

            # 树冠阴影效果
            for _ in range(np.random.randint(5, 15)):
                x, y = np.random.randint(0, self.img_size - 15), np.random.randint(0, self.img_size - 15)
                shadow_size = np.random.randint(8, 20)
                shadow_intensity = 0.7 + np.random.random() * 0.2
                end_x, end_y = min(x + shadow_size, self.img_size), min(y + shadow_size, self.img_size)
                img[:, y:end_y, x:end_x] *= shadow_intensity

            # 森林道路或小径
            if np.random.random() > 0.6:
                path_type = np.random.choice(['straight', 'curved'])
                if path_type == 'straight':
                    if np.random.random() > 0.5:
                        y = np.random.randint(20, self.img_size - 20)
                        path_width = np.random.randint(3, 8)
                        img[:, y:y + path_width, :] = torch.tensor([0.4, 0.3, 0.2]).view(3, 1, 1)

        elif class_id == 2:  # 城市
            # 更复杂的城市结构
            urban_density = 0.6 + np.random.random() * 0.4
            base_color = torch.tensor([0.35 + np.random.random() * 0.25,
                                       0.35 + np.random.random() * 0.25,
                                       0.4 + np.random.random() * 0.25]).view(3, 1, 1)
            img += base_color

            # 建筑物密度和高度变化
            block_size = int(15 + urban_density * 25)
            building_materials = ['concrete', 'glass', 'brick', 'metal']

            for i in range(0, self.img_size, block_size):
                for j in range(0, self.img_size, block_size):
                    if np.random.random() < 0.5 + urban_density * 0.3:
                        # 建筑物
                        building_height = np.random.randint(6, min(block_size - 1, 35))
                        building_width = np.random.randint(6, min(block_size - 1, 35))

                        # 不同材质的建筑物
                        material = np.random.choice(building_materials)
                        if material == 'concrete':
                            building_color = torch.tensor([0.6, 0.6, 0.6]).view(3, 1, 1)
                        elif material == 'glass':
                            building_color = torch.tensor([0.4, 0.5, 0.7]).view(3, 1, 1)
                        elif material == 'brick':
                            building_color = torch.tensor([0.7, 0.4, 0.3]).view(3, 1, 1)
                        else:  # metal
                            building_color = torch.tensor([0.5, 0.5, 0.6]).view(3, 1, 1)

                        # 添加随机变化
                        building_color += torch.randn(3, 1, 1) * 0.1

                        # 阴影效果
                        shadow_factor = 0.7 + np.random.random() * 0.5
                        building_color *= shadow_factor

                        end_i = min(i + building_height, self.img_size)
                        end_j = min(j + building_width, self.img_size)
                        img[:, i:end_i, j:end_j] = building_color

            # 更复杂的道路网络
            # 主干道
            num_main_roads = np.random.randint(2, 5)
            for _ in range(num_main_roads):
                road_type = np.random.choice(['horizontal', 'vertical'])
                road_width = np.random.randint(6, 12)
                if road_type == 'horizontal':
                    road_y = np.random.randint(road_width, self.img_size - road_width)
                    img[:, road_y:road_y + road_width, :] = torch.tensor([0.15, 0.15, 0.15]).view(3, 1, 1)
                else:
                    road_x = np.random.randint(road_width, self.img_size - road_width)
                    img[:, :, road_x:road_x + road_width] = torch.tensor([0.15, 0.15, 0.15]).view(3, 1, 1)

            # 小街道
            num_small_roads = np.random.randint(3, 8)
            for _ in range(num_small_roads):
                road_type = np.random.choice(['horizontal', 'vertical'])
                road_width = np.random.randint(2, 5)
                if road_type == 'horizontal':
                    road_y = np.random.randint(0, self.img_size - road_width)
                    img[:, road_y:road_y + road_width, :] = torch.tensor([0.25, 0.25, 0.25]).view(3, 1, 1)
                else:
                    road_x = np.random.randint(0, self.img_size - road_width)
                    img[:, :, road_x:road_x + road_width] = torch.tensor([0.25, 0.25, 0.25]).view(3, 1, 1)

        elif class_id == 3:  # 水体
            # 更真实的水面效果
            water_type = np.random.choice(['lake', 'river', 'ocean'])
            water_clarity = 0.3 + np.random.random() * 0.7

            if water_type == 'lake':
                base_color = torch.tensor([0.05, 0.15 + water_clarity * 0.2, 0.5 + water_clarity * 0.4]).view(3, 1, 1)
            elif water_type == 'river':
                base_color = torch.tensor([0.1, 0.2 + water_clarity * 0.15, 0.4 + water_clarity * 0.3]).view(3, 1, 1)
            else:  # ocean
                base_color = torch.tensor([0.02, 0.1 + water_clarity * 0.1, 0.6 + water_clarity * 0.3]).view(3, 1, 1)

            img += base_color * weather_factor

            # 多层波纹和反射效果
            y, x = torch.meshgrid(torch.arange(self.img_size), torch.arange(self.img_size), indexing='ij')

            # 大波浪
            for freq in [0.03, 0.07, 0.12]:
                phase_x = np.random.random() * 2 * np.pi
                phase_y = np.random.random() * 2 * np.pi
                amplitude = 0.04 + np.random.random() * 0.08
                wave = amplitude * torch.sin(freq * x + phase_x) * torch.cos(freq * y + phase_y)
                img += wave.unsqueeze(0)

            # 小波纹
            for _ in range(np.random.randint(3, 8)):
                center_x, center_y = np.random.randint(0, self.img_size), np.random.randint(0, self.img_size)
                radius = np.random.randint(20, 60)
                ripple_strength = 0.02 + np.random.random() * 0.04
                distance = torch.sqrt((x - center_x) ** 2 + (y - center_y) ** 2)
                ripple = ripple_strength * torch.sin(distance * 0.3) * torch.exp(-distance / radius)
                img += ripple.unsqueeze(0)

            # 岸边和植被
            if np.random.random() > 0.3:
                shore_complexity = np.random.choice(['simple', 'complex'])
                if shore_complexity == 'complex':
                    # 不规则岸线
                    shore_side = np.random.choice(['top', 'bottom', 'left', 'right'])
                    base_shore_width = np.random.randint(10, 30)

                    if shore_side == 'top':
                        for x_pos in range(0, self.img_size, 5):
                            shore_width = base_shore_width + np.random.randint(-8, 8)
                            shore_width = max(5, min(shore_width, 40))
                            shore_color = torch.tensor([0.4 + np.random.random() * 0.2,
                                                        0.3 + np.random.random() * 0.2,
                                                        0.15 + np.random.random() * 0.15])
                            end_x = min(x_pos + 5, self.img_size)
                            img[:, :shore_width, x_pos:end_x] = shore_color.view(3, 1, 1)

        elif class_id == 4:  # 裸土
            # 不同类型和状态的土壤
            soil_conditions = np.random.choice(['dry', 'moist', 'eroded', 'cultivated'])
            soil_type = np.random.choice(['clay', 'sand', 'rocky', 'loam'])

            if soil_conditions == 'dry':
                brightness_factor = 1.2
            elif soil_conditions == 'moist':
                brightness_factor = 0.8
            elif soil_conditions == 'eroded':
                brightness_factor = 1.0
            else:  # cultivated
                brightness_factor = 0.9

            if soil_type == 'clay':
                base_color = torch.tensor([0.5, 0.3, 0.2]).view(3, 1, 1)
                texture_scale = 0.15
            elif soil_type == 'sand':
                base_color = torch.tensor([0.7, 0.6, 0.4]).view(3, 1, 1)
                texture_scale = 0.25
            elif soil_type == 'rocky':
                base_color = torch.tensor([0.4, 0.35, 0.3]).view(3, 1, 1)
                texture_scale = 0.1
            else:  # loam
                base_color = torch.tensor([0.6, 0.4, 0.25]).view(3, 1, 1)
                texture_scale = 0.2

            img += base_color * brightness_factor

            # 多层次土壤纹理
            for scale in [2, 6, 12, 20]:
                texture = torch.randn(3, self.img_size, self.img_size) * texture_scale / scale
                img += texture

            # 特殊土壤特征
            if soil_conditions == 'eroded':
                # 侵蚀沟壑
                y, x = torch.meshgrid(torch.arange(self.img_size), torch.arange(self.img_size), indexing='ij')
                erosion_pattern = 0.15 * torch.sin(0.02 * x + np.random.random()) * (y / self.img_size)
                img += erosion_pattern.unsqueeze(0)
            elif soil_conditions == 'cultivated':
                # 耕作痕迹
                furrow_spacing = np.random.randint(8, 15)
                for i in range(0, self.img_size, furrow_spacing):
                    furrow_variation = torch.randn(3, 1, self.img_size) * 0.1
                    if i + 2 < self.img_size:
                        img[:, i:i + 2, :] += furrow_variation

        # 为其他类别添加基本实现
        elif class_id == 5:  # 草地
            grass_health = 0.5 + np.random.random() * 0.5
            base_color = torch.tensor([0.1, 0.3 + grass_health * 0.3, 0.1]).view(3, 1, 1)
            img += base_color * season_factor

            # 草地纹理
            for scale in [3, 8, 15]:
                texture = torch.randn(3, self.img_size, self.img_size) * 0.1 / scale
                img += texture

        elif class_id == 6:  # 道路
            road_type = np.random.choice(['highway', 'rural', 'urban'])
            if road_type == 'highway':
                base_color = torch.tensor([0.2, 0.2, 0.2]).view(3, 1, 1)
            elif road_type == 'rural':
                base_color = torch.tensor([0.4, 0.35, 0.3]).view(3, 1, 1)
            else:  # urban
                base_color = torch.tensor([0.15, 0.15, 0.15]).view(3, 1, 1)

            img += base_color

            # 道路标线
            if np.random.random() > 0.5:
                line_y = self.img_size // 2
                img[:, line_y - 1:line_y + 1, :] = torch.tensor([0.9, 0.9, 0.9]).view(3, 1, 1)

        else:  # 其他类别 (7, 8, 9)
            # 生成多样化的其他地物类型
            base_color = torch.rand(3, 1, 1) * 0.6 + 0.2
            img += base_color

            # 添加复杂纹理
            for scale in [1, 4, 12]:
                noise = torch.randn(3, self.img_size, self.img_size) * (0.15 / scale)
                img += noise

        # 添加全局大气和传感器效果
        # 大气散射
        atmospheric_effect = torch.randn(3, self.img_size, self.img_size) * 0.04
        img += atmospheric_effect

        # 传感器噪声
        sensor_noise = torch.randn(3, self.img_size, self.img_size) * 0.02
        img += sensor_noise

        # 椒盐噪声
        if np.random.random() > 0.85:
            salt_pepper = torch.rand(3, self.img_size, self.img_size)
            img[salt_pepper < 0.005] = 1.0  # 盐噪声
            img[salt_pepper > 0.995] = 0.0  # 椒噪声

        # 模拟云阴影
        if np.random.random() > 0.8:
            shadow_intensity = 0.7 + np.random.random() * 0.2
            shadow_x = np.random.randint(0, self.img_size - 50)
            shadow_y = np.random.randint(0, self.img_size - 50)
            shadow_w = np.random.randint(30, 80)
            shadow_h = np.random.randint(30, 80)
            end_x = min(shadow_x + shadow_w, self.img_size)
            end_y = min(shadow_y + shadow_h, self.img_size)
            img[:, shadow_y:end_y, shadow_x:end_x] *= shadow_intensity

        # 确保像素值在[0,1]范围内
        img = torch.clamp(img, 0, 1)

        # 增加混合标签的概率（提高分类难度）
        if np.random.random() > 0.92:  # 8%的概率生成混合类别图像
            secondary_class = (class_id + np.random.randint(1, self.num_classes)) % self.num_classes
            if secondary_class != class_id:
                blend_ratio = 0.2 + np.random.random() * 0.3
                # 添加次要类别的特征
                for _ in range(np.random.randint(2, 4)):
                    x = np.random.randint(0, self.img_size - 25)
                    y = np.random.randint(0, self.img_size - 25)
                    w = np.random.randint(15, 35)
                    h = np.random.randint(15, 35)
                    end_x = min(x + w, self.img_size)
                    end_y = min(y + h, self.img_size)

                    # 简单的次要类别颜色
                    secondary_colors = [
                        [0.2, 0.6, 0.1],  # 绿色 - 农田/草地
                        [0.1, 0.3, 0.1],  # 深绿 - 森林
                        [0.5, 0.5, 0.5],  # 灰色 - 城市
                        [0.1, 0.2, 0.6],  # 蓝色 - 水体
                        [0.6, 0.4, 0.3],  # 棕色 - 裸土
                    ]

                    if secondary_class < len(secondary_colors):
                        secondary_color = torch.tensor(secondary_colors[secondary_class]).view(3, 1, 1)
                        img[:, y:end_y, x:end_x] = img[:, y:end_y, x:end_x] * (1 - blend_ratio) + \
                                                   secondary_color * blend_ratio

        return img, class_id

    def __getitem__(self, idx):
        img, label = self.data[idx], self.labels[idx]

        # 应用数据增强（仅训练时）
        if self.augment:
            # 随机变换
            if np.random.random() > 0.5:
                img = torch.flip(img, [2])  # 水平翻转
            if np.random.random() > 0.7:
                img = torch.flip(img, [1])  # 垂直翻转
            if np.random.random() > 0.6:
                # 随机旋转
                angle = np.random.randint(-15, 15)
                # 简单的90度旋转
                if angle > 7:
                    img = torch.rot90(img, 1, [1, 2])
                elif angle < -7:
                    img = torch.rot90(img, -1, [1, 2])

            # 颜色抖动
            if np.random.random() > 0.5:
                brightness = 0.8 + np.random.random() * 0.4
                img = img * brightness
                img = torch.clamp(img, 0, 1)

        # 标准化
        img = (img - torch.tensor([0.485, 0.456, 0.406]).view(3, 1, 1)) / torch.tensor([0.229, 0.224, 0.225]).view(3, 1,
                                                                                                                   1)

        return img, label

    def __len__(self):
        return self.size


def get_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="改进的ResNet50遥感图像分类模型训练和压缩")

    # 模型选择
    parser.add_argument("--model", "-m", default="resnet50",
                        choices=["resnet18", "resnet50", "resnet101", "efficientnet_b0"],
                        help="要使用的模型架构")

    # 数据和输出参数
    parser.add_argument("--data-dir", "-d", default="./data",
                        help="遥感数据集目录路径")
    parser.add_argument("--output-dir", "-o", default="./output",
                        help="模型和结果的输出目录")
    parser.add_argument("--num-classes", type=int, default=10,
                        help="分类类别数量")

    # 训练参数
    parser.add_argument("--epochs", "-e", type=int, default=25,
                        help="训练轮数")
    parser.add_argument("--batch-size", "-b", type=int, default=16,
                        help="批次大小")
    parser.add_argument("--learning-rate", "-lr", type=float, default=0.001,
                        help="学习率")
    parser.add_argument("--weight-decay", type=float, default=5e-4,
                        help="权重衰减")
    parser.add_argument("--dataset-size", type=int, default=5000,
                        help="数据集大小")
    parser.add_argument("--img-size", type=int, default=224,
                        help="输入图像大小")

    # 压缩参数 - 默认设置为conservative
    parser.add_argument("--compression-level", choices=['conservative', 'moderate', 'aggressive'],
                        default='conservative',  # 设置默认为conservative级别
                        help="压缩级别")
    parser.add_argument("--bits", type=int, default=None,
                        help="量化位数")
    parser.add_argument("--sparsity", type=float, default=None,
                        help="剪枝稀疏度")
    parser.add_argument("--finetune-epochs", type=int, default=10,
                        help="压缩后微调轮数")

    # 模式选择
    parser.add_argument("--mode", choices=['train', 'compress', 'both'], default='both',
                        help="运行模式")
    parser.add_argument("--pretrained-path", type=str, default=None,
                        help="预训练模型路径")
    parser.add_argument("--use-pretrained", action="store_true", default=True,
                        help="使用ImageNet预训练权重")

    return parser.parse_args()


def get_compression_params(level):
    """改进的压缩参数 - 更保守和渐进的设置"""
    params = {
        'conservative': {'bits': 8, 'sparsity': 0.2},  # 降低到20%
        'moderate': {'bits': 6, 'sparsity': 0.35},  # 降低到35%
        'aggressive': {'bits': 4, 'sparsity': 0.5}  # 降低到50%
    }
    return params[level]


def load_resnet_model(model_name, num_classes=10, use_pretrained=True):
    """加载带有Dropout的ResNet模型"""
    print(f"加载 {model_name} 模型...")

    if model_name == "resnet18":
        from torchvision.models import resnet18, ResNet18_Weights
        if use_pretrained:
            model = resnet18(weights=ResNet18_Weights.IMAGENET1K_V1)
            print("使用ImageNet预训练的ResNet18")
        else:
            model = resnet18(weights=None)
            print("使用随机初始化的ResNet18")

        # 添加Dropout的分类器
        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "resnet50":
        from torchvision.models import resnet50, ResNet50_Weights
        if use_pretrained:
            model = resnet50(weights=ResNet50_Weights.IMAGENET1K_V2)
            print("使用ImageNet预训练的ResNet50")
        else:
            model = resnet50(weights=None)
            print("使用随机初始化的ResNet50")

        # 添加Dropout的分类器
        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "resnet101":
        from torchvision.models import resnet101, ResNet101_Weights
        if use_pretrained:
            model = resnet101(weights=ResNet101_Weights.IMAGENET1K_V2)
            print("使用ImageNet预训练的ResNet101")
        else:
            model = resnet101(weights=None)
            print("使用随机初始化的ResNet101")

        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "efficientnet_b0":
        from torchvision.models import efficientnet_b0, EfficientNet_B0_Weights
        if use_pretrained:
            model = efficientnet_b0(weights=EfficientNet_B0_Weights.IMAGENET1K_V1)
            print("使用ImageNet预训练的EfficientNet-B0")
        else:
            model = efficientnet_b0(weights=None)
            print("使用随机初始化的EfficientNet-B0")

        model.classifier = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.classifier[1].in_features, num_classes)
        )

    else:
        raise ValueError(f"不支持的模型: {model_name}")

    return model


def create_dataloaders(data_dir, batch_size=16, dataset_size=5000, img_size=224, num_classes=10):
    """创建改进的遥感图像分类数据集加载器"""
    print("准备改进的遥感图像分类数据集...")

    # 创建训练和测试数据集（8:2分割）
    train_size = int(0.8 * dataset_size)
    test_size = dataset_size - train_size

    train_dataset = ImprovedRemoteSensingDataset(
        size=train_size,
        num_classes=num_classes,
        img_size=img_size,
        split='train',
        augment=True  # 训练时启用数据增强
    )

    test_dataset = ImprovedRemoteSensingDataset(
        size=test_size,
        num_classes=num_classes,
        img_size=img_size,
        split='test',
        augment=False  # 测试时不使用数据增强
    )

    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=4,
        pin_memory=True,
        drop_last=True  # 丢弃最后一个不完整的batch
    )

    test_loader = DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=4,
        pin_memory=True
    )

    print(f"训练集大小: {len(train_dataset)}")
    print(f"测试集大小: {len(test_dataset)}")

    return train_loader, test_loader


def train_model(model, train_loader, test_loader, device, args):
    """改进的训练函数，解决过拟合问题"""
    print("开始训练ResNet模型...")

    # 定义损失函数（添加标签平滑）
    criterion = nn.CrossEntropyLoss(label_smoothing=0.1)

    # 使用分层学习率
    if args.use_pretrained:
        # 对预训练模型使用不同的学习率
        backbone_params = []
        classifier_params = []

        for name, param in model.named_parameters():
            if 'fc' in name:
                classifier_params.append(param)
            else:
                backbone_params.append(param)

        optimizer = optim.AdamW([
            {'params': backbone_params, 'lr': args.learning_rate * 0.1},
            {'params': classifier_params, 'lr': args.learning_rate}
        ], weight_decay=args.weight_decay)
    else:
        optimizer = optim.AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)

    # 改进的学习率调度器
    scheduler = optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=args.learning_rate,
        epochs=args.epochs,
        steps_per_epoch=len(train_loader),
        pct_start=0.3
    )

    # 记录训练历史
    train_losses = []
    train_accuracies = []
    test_accuracies = []

    best_test_acc = 0.0
    best_model_state = None
    patience_counter = 0
    max_patience = 8

    # 训练循环
    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        correct_train = 0
        total_train = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, (images, labels) in enumerate(pbar):
            images, labels = images.to(device), labels.to(device)

            # 前向传播
            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)

            # 反向传播
            loss.backward()

            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)

            optimizer.step()
            scheduler.step()

            # 统计
            running_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total_train += labels.size(0)
            correct_train += (predicted == labels).sum().item()

            # 更新进度条
            current_lr = scheduler.get_last_lr()[0]
            pbar.set_postfix({
                'Loss': f'{loss.item():.4f}',
                'Acc': f'{100. * correct_train / total_train:.2f}%',
                'LR': f'{current_lr:.6f}'
            })

        # 计算平均训练损失和准确率
        avg_train_loss = running_loss / len(train_loader)
        train_acc = correct_train / total_train

        # 测试阶段
        test_acc = evaluate_model(model, test_loader, device)

        # 记录历史
        train_losses.append(avg_train_loss)
        train_accuracies.append(train_acc)
        test_accuracies.append(test_acc)

        # 保存最佳模型
        if test_acc > best_test_acc:
            best_test_acc = test_acc
            best_model_state = copy.deepcopy(model.state_dict())
            patience_counter = 0
        else:
            patience_counter += 1

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train Acc: {train_acc:.4f}, '
              f'Test Acc: {test_acc:.4f}, '
              f'Best Test Acc: {best_test_acc:.4f}')

        # 早停
        if patience_counter >= max_patience:
            print(f"Early stopping triggered after {epoch + 1} epochs")
            break

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f'训练完成! 最佳测试准确率: {best_test_acc:.4f}')

    # 绘制训练曲线
    plot_training_curves(train_losses, train_accuracies, test_accuracies, args.output_dir)

    return model, best_test_acc, {
        'train_losses': train_losses,
        'train_accuracies': train_accuracies,
        'test_accuracies': test_accuracies,
        'best_test_acc': best_test_acc
    }


def evaluate_model(model, dataloader, device):
    """评估分类模型准确率"""
    model.eval()
    correct = 0
    total = 0

    with torch.no_grad():
        for images, labels in dataloader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = correct / total if total > 0 else 0
    return accuracy


def comprehensive_evaluation(model, dataloader, device, class_names):
    """全面评估模型性能"""
    model.eval()
    y_true = []
    y_pred = []
    y_probs = []

    with torch.no_grad():
        for images, labels in dataloader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            probs = F.softmax(outputs, dim=1)
            _, predicted = torch.max(outputs, 1)

            y_true.extend(labels.cpu().numpy())
            y_pred.extend(predicted.cpu().numpy())
            y_probs.extend(probs.cpu().numpy())

    # 计算各种指标
    from sklearn.metrics import accuracy_score, precision_recall_fscore_support, confusion_matrix

    accuracy = accuracy_score(y_true, y_pred)
    precision, recall, f1, _ = precision_recall_fscore_support(y_true, y_pred, average='macro')
    cm = confusion_matrix(y_true, y_pred)

    return {
        'accuracy': accuracy,
        'precision': precision,
        'recall': recall,
        'f1_score': f1,
        'confusion_matrix': cm.tolist(),
        'y_true': y_true,
        'y_pred': y_pred
    }


def plot_training_curves(train_losses, train_accuracies, test_accuracies, output_dir):
    """绘制训练曲线"""
    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(15, 5))

    # 绘制损失曲线
    plt.subplot(1, 3, 1)
    plt.plot(epochs, train_losses, 'b-', label='Training Loss', linewidth=2)
    plt.title('Training Loss', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Loss', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 绘制准确率曲线
    plt.subplot(1, 3, 2)
    plt.plot(epochs, train_accuracies, 'b-', label='Training Accuracy', linewidth=2)
    plt.plot(epochs, test_accuracies, 'r-', label='Test Accuracy', linewidth=2)
    plt.title('Training and Test Accuracy', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Accuracy', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 绘制过拟合分析
    plt.subplot(1, 3, 3)
    overfitting_gap = [train_acc - test_acc for train_acc, test_acc in zip(train_accuracies, test_accuracies)]
    plt.plot(epochs, overfitting_gap, 'g-', label='Overfitting Gap', linewidth=2)
    plt.title('Overfitting Analysis', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Train Acc - Test Acc', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.axhline(y=0, color='k', linestyle='--', alpha=0.5)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'training_curves.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"训练曲线已保存到: {os.path.join(output_dir, 'training_curves.png')}")


def visualize_samples(dataloader, output_dir, num_samples=16):
    """可视化数据样本"""
    class_names = ['农田', '森林', '城市', '水体', '裸土', '草地', '道路', '工业区', '山地', '其他']

    fig, axes = plt.subplots(4, 4, figsize=(16, 16))
    fig.suptitle('改进的遥感图像分类数据样本', fontsize=16)

    images, labels = next(iter(dataloader))

    for i in range(min(num_samples, len(images))):
        row, col = i // 4, i % 4

        # 反标准化图像用于显示
        img = images[i].clone()
        mean = torch.tensor([0.485, 0.456, 0.406]).view(3, 1, 1)
        std = torch.tensor([0.229, 0.224, 0.225]).view(3, 1, 1)
        img = img * std + mean
        img = torch.clamp(img, 0, 1)
        img = img.permute(1, 2, 0).numpy()

        axes[row, col].imshow(img)
        axes[row, col].set_title(f'{class_names[labels[i]]} (Class {labels[i]})', fontsize=12)
        axes[row, col].axis('off')

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'data_samples.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"数据样本已保存到: {os.path.join(output_dir, 'data_samples.png')}")


def get_model_size(model):
    """计算模型在内存中的大小（MB）"""
    param_size = 0
    buffer_size = 0

    for param in model.parameters():
        param_size += param.nelement() * param.element_size()

    for buffer in model.buffers():
        buffer_size += buffer.nelement() * buffer.element_size()

    total_size = param_size + buffer_size
    size_mb = total_size / (1024 * 1024)
    return size_mb


def quantize_weights_improved(weight, bits=8):
    """改进的权重量化函数"""
    if bits >= 16:
        return weight.clone()

    # 处理全零权重
    if weight.abs().max() < 1e-8:
        return weight.clone()

    if bits == 8:
        # 8位对称量化
        max_val = weight.abs().max()
        scale = max_val / 127.0
        q_weight = torch.round(weight / scale)
        q_weight = torch.clamp(q_weight, -127, 127)
        dq_weight = q_weight * scale
        return dq_weight

    elif bits == 6:
        # 6位对称量化
        max_val = weight.abs().max()
        scale = max_val / 31.0
        q_weight = torch.round(weight / scale)
        q_weight = torch.clamp(q_weight, -31, 31)
        dq_weight = q_weight * scale
        return dq_weight

    else:  # 4位及以下
        # 4位对称量化
        max_val = weight.abs().max()
        scale = max_val / 7.0
        q_weight = torch.round(weight / scale)
        q_weight = torch.clamp(q_weight, -7, 7)
        dq_weight = q_weight * scale
        return dq_weight


def prune_weights_improved(weight, sparsity=0.2, layer_name=""):
    """改进的权重剪枝函数 - 更保守的分层策略"""
    if sparsity <= 0:
        return weight.clone(), torch.ones_like(weight).bool()

    # 改进的分层策略
    if 'conv1' in layer_name or 'bn1' in layer_name:
        # 第一层非常保守
        actual_sparsity = sparsity * 0.5
    elif 'layer1' in layer_name:
        actual_sparsity = sparsity * 0.7
    elif 'layer2' in layer_name:
        actual_sparsity = sparsity * 0.85
    elif 'layer3' in layer_name:
        actual_sparsity = sparsity * 0.95
    elif 'layer4' in layer_name:
        actual_sparsity = sparsity * 1.0
    elif 'fc' in layer_name:
        # 分类器层保守一些
        actual_sparsity = sparsity * 0.8
    else:
        actual_sparsity = sparsity

    # 确保不会过度剪枝
    actual_sparsity = min(actual_sparsity, 0.8)

    # 使用幅度剪枝
    weight_abs = weight.abs()
    weight_flat = weight_abs.flatten()

    if len(weight_flat) == 0:
        return weight.clone(), torch.ones_like(weight).bool()

    k = int(weight_flat.numel() * actual_sparsity)

    # 安全检查
    if k >= weight_flat.numel():
        k = weight_flat.numel() - 1

    if k <= 0:
        return weight.clone(), torch.ones_like(weight).bool()

    try:
        # 找到第k小的元素作为阈值
        threshold = torch.kthvalue(weight_flat, k + 1).values
        mask = (weight.abs() >= threshold)

        # 确保至少保留一些权重
        if mask.sum().item() == 0:
            max_idx = torch.argmax(weight_flat)
            mask = torch.zeros_like(weight, dtype=torch.bool)
            mask.view(-1)[max_idx] = True

    except Exception as e:
        print(f"剪枝失败: {layer_name}, 错误: {e}")
        # 备用策略
        threshold = torch.quantile(weight_flat, actual_sparsity)
        mask = (weight.abs() >= threshold)

    pruned_weight = weight * mask.float()
    return pruned_weight, mask


def progressive_compress_model(model, target_bits=8, target_sparsity=0.2, steps=3):
    """渐进式模型压缩"""
    print(f"🚀 渐进式压缩模型（{steps}步，目标: {target_bits}位, {target_sparsity}稀疏度）...")

    compressed_model = copy.deepcopy(model)

    # 计算每步的压缩参数
    sparsity_step = target_sparsity / steps
    current_sparsity = 0

    for step in range(steps):
        current_sparsity += sparsity_step
        print(f"第 {step + 1}/{steps} 步压缩，当前稀疏度目标: {current_sparsity:.3f}")

        # 统计
        total_elements = 0
        zero_elements = 0

        # 对每层进行压缩
        for name, param in compressed_model.named_parameters():
            if 'weight' in name and param.numel() > 10:
                with torch.no_grad():
                    original_param = param.data.clone()

                    # 量化（最后一步才做）
                    if step == steps - 1:
                        quantized_weight = quantize_weights_improved(original_param, target_bits)
                    else:
                        quantized_weight = original_param

                    # 剪枝
                    pruned_weight, mask = prune_weights_improved(quantized_weight, current_sparsity, name)

                    # 更新参数
                    param.copy_(pruned_weight)

                    # 统计
                    total_elements += param.numel()
                    zero_elements += (param.data == 0).sum().item()

        overall_sparsity = zero_elements / total_elements if total_elements > 0 else 0
        print(f"  步骤 {step + 1} 完成，实际稀疏度: {overall_sparsity:.4f}")

    final_sparsity = zero_elements / total_elements if total_elements > 0 else 0
    print(f"✅ 渐进式压缩完成，最终稀疏度: {final_sparsity:.4f}")

    return compressed_model, final_sparsity


def finetune_model_improved(model, train_loader, test_loader, device, epochs=10, lr=0.0001):
    """改进的约束微调"""
    print(f"🎯 开始改进的约束微调 ({epochs} 轮)...")

    criterion = nn.CrossEntropyLoss(label_smoothing=0.1)

    # 记录初始压缩状态
    initial_sparsity_masks = {}
    for name, param in model.named_parameters():
        if 'weight' in name and param.numel() > 100:
            initial_sparsity_masks[name] = (param.data != 0)

    print(f"🔒 记录了 {len(initial_sparsity_masks)} 层的压缩约束")

    # 分层微调策略
    layer_groups = [
        ['fc'],  # 先微调分类器
        ['layer4'],  # 再微调最后一层
        ['layer3'],  # 然后是layer3
        ['layer2', 'layer1']  # 最后是前面的层
    ]

    initial_acc = evaluate_model(model, test_loader, device)
    print(f"🎯 微调前准确率: {initial_acc:.4f}")

    best_acc = initial_acc
    best_state = copy.deepcopy(model.state_dict())

    for phase, layer_names in enumerate(layer_groups):
        print(f"  阶段 {phase + 1}/{len(layer_groups)}: 微调 {layer_names}")

        # 冻结所有参数
        for param in model.parameters():
            param.requires_grad = False

        # 解冻当前阶段的参数
        for name, param in model.named_parameters():
            if any(layer_name in name for layer_name in layer_names):
                param.requires_grad = True

        # 为不同阶段使用不同的学习率
        phase_lr = lr * (0.5 ** phase)
        optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                               lr=phase_lr, weight_decay=1e-6)

        phase_epochs = max(1, epochs // len(layer_groups))

        for epoch in range(phase_epochs):
            model.train()
            running_loss = 0.0

            for batch_idx, (images, labels) in enumerate(train_loader):
                if batch_idx >= 50:  # 限制每个阶段的训练步数
                    break

                images, labels = images.to(device), labels.to(device)

                optimizer.zero_grad()
                outputs = model(images)
                loss = criterion(outputs, labels)
                loss.backward()

                # 非常小的梯度裁剪
                torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=0.1)

                optimizer.step()

                # 强制保持压缩约束
                with torch.no_grad():
                    for name, param in model.named_parameters():
                        if name in initial_sparsity_masks:
                            param.data = param.data * initial_sparsity_masks[name].float()

                running_loss += loss.item()

            # 评估当前阶段
            test_acc = evaluate_model(model, test_loader, device)
            print(f"    阶段 {phase + 1} Epoch {epoch + 1}: 测试准确率 {test_acc:.4f}")

            if test_acc > best_acc:
                best_acc = test_acc
                best_state = copy.deepcopy(model.state_dict())

    # 恢复所有参数的梯度
    for param in model.parameters():
        param.requires_grad = True

    # 加载最佳模型
    model.load_state_dict(best_state)
    final_acc = evaluate_model(model, test_loader, device)

    print(f"✅ 改进微调完成! 最终准确率: {final_acc:.4f} (提升: {final_acc - initial_acc:.4f})")

    return model, final_acc


def save_compressed_model_improved(model, model_info, path):
    """改进的压缩模型保存"""
    print("🔄 保存压缩模型...")

    # 保存模型
    torch.save({
        'state_dict': model.state_dict(),
        'model_info': model_info,
        'compression_applied': True
    }, path)

    file_size = os.path.getsize(path)

    # 尝试gzip压缩
    try:
        with open(path, 'rb') as f_in:
            with gzip.open(path + '.gz', 'wb', compresslevel=9) as f_out:
                f_out.write(f_in.read())

        gz_size = os.path.getsize(path + '.gz')

        if gz_size < file_size * 0.8:
            os.rename(path + '.gz', path)
            file_size = gz_size
            print(f"🗜️ Gzip压缩后大小: {file_size / (1024 * 1024):.2f} MB")
        else:
            os.remove(path + '.gz')

    except Exception as e:
        print(f"⚠️ Gzip压缩失败: {e}")

    print(f"💾 最终文件大小: {file_size / (1024 * 1024):.2f} MB")
    return file_size


def analyze_resnet_structure(model):
    """分析ResNet模型结构"""
    print("ResNet模型结构分析:")
    total_params = 0
    for name, param in model.named_parameters():
        num_params = param.numel()
        total_params += num_params
        shape_str = str(list(param.shape))
        print(f"{name:40s} {shape_str:25s} {num_params:>10,}")

    print(f"\n总参数量: {total_params:,}")
    print(f"模型大小: {get_model_size(model):.2f} MB")


def main():
    """主函数"""
    args = get_args()

    # 确保输出目录存在
    os.makedirs(args.output_dir, exist_ok=True)

    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 获取压缩参数
    if args.bits is not None and args.sparsity is not None:
        compression_params = {'bits': args.bits, 'sparsity': args.sparsity}
    else:
        compression_params = get_compression_params(args.compression_level)

    print(f"压缩级别: {args.compression_level}")
    print(f"压缩参数: {compression_params['bits']}位量化, {compression_params['sparsity']:.1%}稀疏度")

    # 创建模型
    model = load_resnet_model(args.model, args.num_classes, args.use_pretrained)
    model = model.to(device)

    # 分析模型结构
    analyze_resnet_structure(model)

    trained_model_path = os.path.join(args.output_dir, f"{args.model}_trained.pth")
    training_history = None

    # 训练模式
    if args.mode in ['train', 'both']:
        print("=" * 50)
        print("开始训练阶段")
        print("=" * 50)

        # 创建数据加载器
        train_loader, test_loader = create_dataloaders(
            args.data_dir, args.batch_size, args.dataset_size,
            args.img_size, args.num_classes
        )

        # 可视化数据样本
        visualize_samples(train_loader, args.output_dir)

        # 训练模型
        model, best_acc, training_history = train_model(
            model, train_loader, test_loader, device, args
        )

        # 保存训练好的模型
        torch.save(model.state_dict(), trained_model_path)
        print(f"训练好的模型已保存到: {trained_model_path}")

    # 压缩模式
    if args.mode in ['compress', 'both']:
        print("=" * 50)
        print("开始改进的压缩阶段")
        print("=" * 50)

        # 如果是仅压缩模式，加载预训练模型
        if args.mode == 'compress':
            if args.pretrained_path and os.path.exists(args.pretrained_path):
                model.load_state_dict(torch.load(args.pretrained_path, map_location=device))
                print(f"已加载预训练模型: {args.pretrained_path}")
            elif os.path.exists(trained_model_path):
                model.load_state_dict(torch.load(trained_model_path, map_location=device))
                print(f"已加载训练好的模型: {trained_model_path}")

        # 创建测试数据加载器
        _, test_loader = create_dataloaders(
            args.data_dir, args.batch_size, args.dataset_size,
            args.img_size, args.num_classes
        )

        # 评估原始模型
        print("评估原始模型...")
        original_accuracy = evaluate_model(model, test_loader, device)
        original_size = get_model_size(model)

        # 保存原始模型
        original_path = os.path.join(args.output_dir, f"{args.model}_original.pth")
        torch.save(model.state_dict(), original_path)
        original_file_size = os.path.getsize(original_path) / (1024 * 1024)

        print(f"📊 原始模型准确率: {original_accuracy:.4f}")
        print(f"💾 原始模型内存大小: {original_size:.2f} MB")
        print(f"📁 原始模型文件大小: {original_file_size:.2f} MB")

        # 全面评估原始模型
        class_names = ['农田', '森林', '城市', '水体', '裸土', '草地', '道路', '工业区', '山地', '其他']
        original_eval = comprehensive_evaluation(model, test_loader, device, class_names)

        print(f"📈 原始模型详细评估:")
        print(f"  准确率: {original_eval['accuracy']:.4f}")
        print(f"  精确率: {original_eval['precision']:.4f}")
        print(f"  召回率: {original_eval['recall']:.4f}")
        print(f"  F1分数: {original_eval['f1_score']:.4f}")

        # 渐进式压缩模型
        start_time = time.time()
        compressed_model, actual_sparsity = progressive_compress_model(
            model,
            compression_params['bits'],
            compression_params['sparsity'],
            steps=3
        )
        compression_time = time.time() - start_time
        print(f"⏱️ 渐进式压缩完成，耗时: {compression_time:.2f} 秒")

        # 评估压缩后的模型
        print("评估压缩后的模型...")
        compressed_model = compressed_model.to(device)
        compressed_accuracy = evaluate_model(compressed_model, test_loader, device)

        accuracy_drop = (original_accuracy - compressed_accuracy) * 100
        print(f"📉 压缩后模型准确率: {compressed_accuracy:.4f}")
        print(f"📊 准确率下降: {accuracy_drop:.2f}%")

        # 改进的微调
        if args.finetune_epochs > 0:
            print("开始改进的约束微调...")
            train_loader, _ = create_dataloaders(
                args.data_dir, args.batch_size, args.dataset_size,
                args.img_size, args.num_classes
            )
            compressed_model, finetuned_accuracy = finetune_model_improved(
                compressed_model, train_loader, test_loader, device, args.finetune_epochs
            )
        else:
            finetuned_accuracy = compressed_accuracy

        # 全面评估微调后的模型
        finetuned_eval = comprehensive_evaluation(compressed_model, test_loader, device, class_names)

        print(f"📈 微调后模型详细评估:")
        print(f"  准确率: {finetuned_eval['accuracy']:.4f}")
        print(f"  精确率: {finetuned_eval['precision']:.4f}")
        print(f"  召回率: {finetuned_eval['recall']:.4f}")
        print(f"  F1分数: {finetuned_eval['f1_score']:.4f}")

        # 计算压缩后的大小
        compressed_size = get_model_size(compressed_model)

        # 保存压缩模型
        compressed_path = os.path.join(args.output_dir, f"{args.model}_improved_compressed.pth")
        model_info = {
            'model_type': args.model,
            'bits': compression_params['bits'],
            'sparsity': compression_params['sparsity'],
            'actual_sparsity': actual_sparsity,
            'compression_method': 'progressive'
        }
        compressed_file_size_bytes = save_compressed_model_improved(compressed_model, model_info, compressed_path)
        compressed_file_size = compressed_file_size_bytes / (1024 * 1024)

        # 计算压缩率
        memory_compression_ratio = original_size / compressed_size if compressed_size > 0 else 1.0
        file_compression_ratio = original_file_size / compressed_file_size if compressed_file_size > 0 else 1.0

        print(f"💾 压缩后模型内存大小: {compressed_size:.2f} MB")
        print(f"📁 压缩后模型文件大小: {compressed_file_size:.2f} MB")
        print(f"🗜️ 内存压缩率: {memory_compression_ratio:.2f}倍")
        print(f"🗜️ 文件压缩率: {file_compression_ratio:.2f}倍")

        # 绘制混淆矩阵对比
        plot_confusion_matrices(original_eval, finetuned_eval, class_names, args.output_dir)

        # 保存详细结果
        results = {
            'model': args.model,
            'task': 'improved_remote_sensing_classification',
            'num_classes': args.num_classes,
            'dataset_size': args.dataset_size,
            'compression_level': args.compression_level,
            'compression_params': {
                'bits': compression_params['bits'],
                'target_sparsity': compression_params['sparsity'],
                'actual_sparsity': float(actual_sparsity),
                'method': 'progressive'
            },
            'model_sizes': {
                'original_memory_mb': float(original_size),
                'original_file_mb': float(original_file_size),
                'compressed_memory_mb': float(compressed_size),
                'compressed_file_mb': float(compressed_file_size),
                'memory_compression_ratio': float(memory_compression_ratio),
                'file_compression_ratio': float(file_compression_ratio)
            },
            'performance_metrics': {
                'original': {
                    'accuracy': float(original_eval['accuracy']),
                    'precision': float(original_eval['precision']),
                    'recall': float(original_eval['recall']),
                    'f1_score': float(original_eval['f1_score'])
                },
                'compressed': {
                    'accuracy': float(compressed_accuracy),
                },
                'finetuned': {
                    'accuracy': float(finetuned_eval['accuracy']),
                    'precision': float(finetuned_eval['precision']),
                    'recall': float(finetuned_eval['recall']),
                    'f1_score': float(finetuned_eval['f1_score'])
                },
                'accuracy_drop_final': float((original_eval['accuracy'] - finetuned_eval['accuracy']) * 100)
            },
            'training_history': training_history,
            'confusion_matrices': {
                'original': original_eval['confusion_matrix'],
                'finetuned': finetuned_eval['confusion_matrix']
            }
        }

        results_path = os.path.join(args.output_dir, f"{args.model}_improved_results.json")
        with open(results_path, 'w', encoding='utf-8') as f:
            json.dump(results, f, indent=2, ensure_ascii=False)

        # 输出最终统计
        print("\n" + "=" * 70)
        print("🏆 改进的ResNet50压缩结果统计")
        print("=" * 70)

        if training_history:
            print(f"🎯 训练最佳准确率: {training_history['best_test_acc']:.4f}")

        print(f"📊 原始模型准确率: {original_eval['accuracy']:.4f}")
        print(f"📉 压缩后模型准确率: {compressed_accuracy:.4f}")
        print(f"🎯 微调后模型准确率: {finetuned_eval['accuracy']:.4f}")
        print(f"📊 最终准确率下降: {(original_eval['accuracy'] - finetuned_eval['accuracy']) * 100:.2f}%")
        print(f"🗜️ 内存压缩率: {memory_compression_ratio:.2f}倍")
        print(f"🗜️ 文件压缩率: {file_compression_ratio:.2f}倍")
        print(f"🕳️ 实际稀疏度: {actual_sparsity:.4f}")

        # 评估压缩效果
        accuracy_drop_percent = (original_eval['accuracy'] - finetuned_eval['accuracy']) * 100
        compression_success = file_compression_ratio > 1.5

        if accuracy_drop_percent < 3 and compression_success:
            compression_grade = "🏆 优秀"
        elif accuracy_drop_percent < 6 and compression_success:
            compression_grade = "✅ 良好"
        elif accuracy_drop_percent < 10 and compression_success:
            compression_grade = "⚠️ 可接受"
        else:
            compression_grade = "❌ 需要调整"

        print(f"🏆 压缩效果评级: {compression_grade}")

        # 给出改进建议
        print("\n📋 改进建议:")
        if accuracy_drop_percent > 5:
            print("  • 考虑降低压缩级别或增加微调轮数")
        if file_compression_ratio < 2:
            print("  • 可以尝试更高的压缩级别")
        if original_eval['accuracy'] < 0.8:
            print("  • 建议先提高原始模型性能再进行压缩")

        print(f"💾 详细结果已保存到: {results_path}")

    print(f"\n🎉 改进的ResNet50遥感图像分类模型训练和压缩完成!")


def plot_confusion_matrices(original_eval, finetuned_eval, class_names, output_dir):
    """绘制混淆矩阵对比"""
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 8))

    # 原始模型混淆矩阵
    im1 = ax1.imshow(original_eval['confusion_matrix'], interpolation='nearest', cmap=plt.cm.Blues)
    ax1.figure.colorbar(im1, ax=ax1)
    ax1.set(xticks=np.arange(len(class_names)),
            yticks=np.arange(len(class_names)),
            xticklabels=class_names, yticklabels=class_names,
            title=f"Original Model\nAccuracy: {original_eval['accuracy']:.3f}",
            ylabel='True label',
            xlabel='Predicted label')

    # 添加数值标注
    thresh1 = np.array(original_eval['confusion_matrix']).max() / 2.
    for i in range(len(class_names)):
        for j in range(len(class_names)):
            ax1.text(j, i, format(original_eval['confusion_matrix'][i][j], 'd'),
                     ha="center", va="center",
                     color="white" if original_eval['confusion_matrix'][i][j] > thresh1 else "black")

    # 压缩后模型混淆矩阵
    im2 = ax2.imshow(finetuned_eval['confusion_matrix'], interpolation='nearest', cmap=plt.cm.Blues)
    ax2.figure.colorbar(im2, ax=ax2)
    ax2.set(xticks=np.arange(len(class_names)),
            yticks=np.arange(len(class_names)),
            xticklabels=class_names, yticklabels=class_names,
            title=f"Compressed Model\nAccuracy: {finetuned_eval['accuracy']:.3f}",
            ylabel='True label',
            xlabel='Predicted label')

    # 添加数值标注
    thresh2 = np.array(finetuned_eval['confusion_matrix']).max() / 2.
    for i in range(len(class_names)):
        for j in range(len(class_names)):
            ax2.text(j, i, format(finetuned_eval['confusion_matrix'][i][j], 'd'),
                     ha="center", va="center",
                     color="white" if finetuned_eval['confusion_matrix'][i][j] > thresh2 else "black")

    plt.setp(ax1.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
    plt.setp(ax2.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'confusion_matrices_comparison.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"混淆矩阵对比已保存到: {os.path.join(output_dir, 'confusion_matrices_comparison.png')}")


if __name__ == "__main__":
    main()