#!/usr/bin/env python
"""
遥感图像分类模型训练和压缩示例 - 修复版
针对分类任务优化压缩策略，添加微调机制
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import DataLoader, Dataset, random_split
import copy
import time
import json
import gzip
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm


# 创建一个改进的遥感图像分类数据集类
class RemoteSensingDataset(Dataset):
    def __init__(self, size=2000, num_classes=10, img_size=224):
        self.size = size
        self.num_classes = num_classes
        self.img_size = img_size

        # 预生成数据
        print(f"生成 {size} 个遥感图像分类样本...")
        self.data = []
        self.labels = []

        for i in tqdm(range(size)):
            img, label = self._generate_remote_sensing_image(i % num_classes)
            self.data.append(img)
            self.labels.append(label)

    def _generate_remote_sensing_image(self, class_id):
        """生成具有不同地物特征的遥感图像"""
        img = torch.zeros(3, self.img_size, self.img_size)

        # 根据类别生成不同的地物特征
        if class_id == 0:  # 农田
            # 绿色基调 + 规整的几何模式
            base_color = torch.tensor([0.2, 0.6, 0.2]).view(3, 1, 1)
            img += base_color
            # 添加条纹模式（农田）
            for i in range(0, self.img_size, 20):
                img[:, i:i + 10, :] += torch.rand(3, min(10, self.img_size - i), self.img_size) * 0.1

        elif class_id == 1:  # 森林
            # 深绿色基调 + 随机纹理
            base_color = torch.tensor([0.1, 0.4, 0.1]).view(3, 1, 1)
            img += base_color
            # 添加随机森林纹理
            noise = torch.randn(3, self.img_size, self.img_size) * 0.15
            img += noise

        elif class_id == 2:  # 城市
            # 灰色基调 + 几何结构
            base_color = torch.tensor([0.5, 0.5, 0.5]).view(3, 1, 1)
            img += base_color
            # 添加建筑物模式
            for i in range(0, self.img_size, 30):
                for j in range(0, self.img_size, 30):
                    if np.random.random() > 0.3:
                        h, w = np.random.randint(10, 25), np.random.randint(10, 25)
                        img[:, i:i + h, j:j + w] += torch.rand(3, 1, 1) * 0.3

        elif class_id == 3:  # 水体
            # 蓝色基调 + 平滑纹理
            base_color = torch.tensor([0.1, 0.3, 0.8]).view(3, 1, 1)
            img += base_color
            # 添加水波纹理
            y, x = torch.meshgrid(torch.arange(self.img_size), torch.arange(self.img_size), indexing='ij')
            wave = 0.05 * torch.sin(0.1 * x) * torch.sin(0.1 * y)
            img += wave.unsqueeze(0)

        elif class_id == 4:  # 裸土
            # 棕色基调 + 随机纹理
            base_color = torch.tensor([0.6, 0.4, 0.2]).view(3, 1, 1)
            img += base_color
            # 添加土壤纹理
            texture = torch.randn(3, self.img_size, self.img_size) * 0.2
            img += texture

        elif class_id == 5:  # 草地
            # 浅绿色基调 + 均匀纹理
            base_color = torch.tensor([0.3, 0.7, 0.3]).view(3, 1, 1)
            img += base_color
            # 添加草地纹理
            grass_texture = torch.rand(3, self.img_size, self.img_size) * 0.15
            img += grass_texture

        elif class_id == 6:  # 道路
            # 深灰色基调 + 线性结构
            base_color = torch.tensor([0.3, 0.3, 0.3]).view(3, 1, 1)
            img += base_color
            # 添加道路网络
            if np.random.random() > 0.5:  # 水平道路
                road_y = np.random.randint(50, self.img_size - 50)
                img[:, road_y - 5:road_y + 5, :] = torch.tensor([0.2, 0.2, 0.2]).view(3, 1, 1)
            else:  # 垂直道路
                road_x = np.random.randint(50, self.img_size - 50)
                img[:, :, road_x - 5:road_x + 5] = torch.tensor([0.2, 0.2, 0.2]).view(3, 1, 1)

        elif class_id == 7:  # 工业区
            # 混合色调 + 不规则结构
            base_color = torch.tensor([0.4, 0.4, 0.5]).view(3, 1, 1)
            img += base_color
            # 添加工业建筑
            for _ in range(np.random.randint(3, 8)):
                x, y = np.random.randint(0, self.img_size - 50), np.random.randint(0, self.img_size - 50)
                w, h = np.random.randint(20, 50), np.random.randint(20, 50)
                color = torch.rand(3, 1, 1) * 0.4 + 0.2
                img[:, y:y + h, x:x + w] = color

        elif class_id == 8:  # 山地
            # 棕绿混合 + 高程纹理
            base_color = torch.tensor([0.4, 0.5, 0.3]).view(3, 1, 1)
            img += base_color
            # 添加山地起伏纹理
            y, x = torch.meshgrid(torch.arange(self.img_size), torch.arange(self.img_size), indexing='ij')
            elevation = 0.2 * torch.sin(0.03 * x) * torch.cos(0.03 * y)
            img += elevation.unsqueeze(0)

        else:  # 其他类别
            # 随机颜色和纹理
            base_color = torch.rand(3, 1, 1) * 0.6 + 0.2
            img += base_color
            noise = torch.randn(3, self.img_size, self.img_size) * 0.1
            img += noise

        # 确保像素值在[0,1]范围内
        img = torch.clamp(img, 0, 1)

        return img, class_id

    def __len__(self):
        return self.size

    def __getitem__(self, idx):
        return self.data[idx], self.labels[idx]


# 创建自定义的遥感分类网络
class RemoteSensingNet(nn.Module):
    def __init__(self, num_classes=10):
        super(RemoteSensingNet, self).__init__()

        # 特征提取层
        self.features = nn.Sequential(
            # 第一层卷积块
            nn.Conv2d(3, 64, 7, stride=2, padding=3),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(3, stride=2, padding=1),

            # 第二层卷积块
            nn.Conv2d(64, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2),

            # 第三层卷积块
            nn.Conv2d(128, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2),

            # 第四层卷积块
            nn.Conv2d(256, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
        )

        # 全局平均池化
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))

        # 分类器
        self.classifier = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(512, 256),
            nn.ReLU(inplace=True),
            nn.Dropout(0.3),
            nn.Linear(256, num_classes)
        )

    def forward(self, x):
        x = self.features(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x


def get_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="遥感图像分类模型训练和压缩示例")

    # 模型选择
    parser.add_argument("--model", "-m", default="custom",
                        choices=["custom", "resnet18", "resnet50", "efficientnet_b0", "vit_small"],
                        help="要使用的模型架构")

    # 数据和输出参数
    parser.add_argument("--data-dir", "-d", default="./data",
                        help="遥感数据集目录路径")
    parser.add_argument("--output-dir", "-o", default="./output",
                        help="模型和结果的输出目录")
    parser.add_argument("--num-classes", type=int, default=10,
                        help="分类类别数量 (默认: 10)")

    # 训练参数
    parser.add_argument("--epochs", "-e", type=int, default=30,
                        help="训练轮数 (默认: 30)")
    parser.add_argument("--batch-size", "-b", type=int, default=32,
                        help="批次大小 (默认: 32)")
    parser.add_argument("--learning-rate", "-lr", type=float, default=0.001,
                        help="学习率 (默认: 0.001)")
    parser.add_argument("--weight-decay", type=float, default=1e-4,
                        help="权重衰减 (默认: 1e-4)")
    parser.add_argument("--dataset-size", type=int, default=2000,
                        help="数据集大小 (默认: 2000)")
    parser.add_argument("--img-size", type=int, default=224,
                        help="输入图像大小 (默认: 224)")

    # 改进的压缩参数
    parser.add_argument("--compression-level", choices=['conservative', 'moderate', 'aggressive'],
                        default='conservative',
                        help="压缩级别: conservative(保守), moderate(中等), aggressive(激进)")
    parser.add_argument("--bits", type=int, default=None,
                        help="量化位数 (覆盖compression-level设置)")
    parser.add_argument("--sparsity", type=float, default=None,
                        help="剪枝稀疏度 (覆盖compression-level设置)")
    parser.add_argument("--finetune-epochs", type=int, default=10,
                        help="压缩后微调轮数 (默认: 10)")

    # 模式选择
    parser.add_argument("--mode", choices=['train', 'compress', 'both'], default='both',
                        help="运行模式: train(仅训练), compress(仅压缩), both(训练+压缩)")
    parser.add_argument("--pretrained-path", type=str, default=None,
                        help="预训练模型路径 (用于仅压缩模式)")
    parser.add_argument("--use-pretrained", action="store_true",
                        help="使用预训练权重初始化（仅适用于标准模型）")

    return parser.parse_args()


def get_compression_params(level):
    """根据压缩级别返回分类任务的压缩参数"""
    # 分类任务使用更保守的参数
    params = {
        'conservative': {'bits': 8, 'sparsity': 0.2},  # 更保守
        'moderate': {'bits': 6, 'sparsity': 0.4},  # 中等
        'aggressive': {'bits': 4, 'sparsity': 0.6}  # 降低激进程度
    }
    return params[level]


def load_classification_model(model_name, num_classes=10, use_pretrained=False):
    """加载分类模型"""
    print(f"加载 {model_name} 模型...")

    if model_name == "custom":
        model = RemoteSensingNet(num_classes=num_classes)
        print("使用自定义遥感分类网络")

    elif model_name == "resnet18":
        from torchvision.models import resnet18, ResNet18_Weights
        if use_pretrained:
            model = resnet18(weights=ResNet18_Weights.DEFAULT)
            # 修改最后一层以适应类别数
            model.fc = nn.Linear(model.fc.in_features, num_classes)
        else:
            model = resnet18(num_classes=num_classes)

    elif model_name == "resnet50":
        from torchvision.models import resnet50, ResNet50_Weights
        if use_pretrained:
            model = resnet50(weights=ResNet50_Weights.DEFAULT)
            model.fc = nn.Linear(model.fc.in_features, num_classes)
        else:
            model = resnet50(num_classes=num_classes)

    elif model_name == "efficientnet_b0":
        from torchvision.models import efficientnet_b0, EfficientNet_B0_Weights
        if use_pretrained:
            model = efficientnet_b0(weights=EfficientNet_B0_Weights.DEFAULT)
            model.classifier[1] = nn.Linear(model.classifier[1].in_features, num_classes)
        else:
            model = efficientnet_b0(num_classes=num_classes)

    elif model_name == "vit_small":
        from torchvision.models import vit_b_16, ViT_B_16_Weights
        if use_pretrained:
            model = vit_b_16(weights=ViT_B_16_Weights.DEFAULT)
            model.heads.head = nn.Linear(model.heads.head.in_features, num_classes)
        else:
            model = vit_b_16(num_classes=num_classes)
    else:
        raise ValueError(f"不支持的模型: {model_name}")

    return model


def create_dataloaders(data_dir, batch_size=32, dataset_size=2000, img_size=224, num_classes=10):
    """创建遥感图像分类数据集加载器"""
    print("准备遥感图像分类数据集...")

    # 创建完整数据集
    full_dataset = RemoteSensingDataset(
        size=dataset_size,
        num_classes=num_classes,
        img_size=img_size
    )

    # 划分训练集和测试集 (80%训练, 20%测试)
    train_size = int(0.8 * len(full_dataset))
    test_size = len(full_dataset) - train_size
    train_dataset, test_dataset = random_split(full_dataset, [train_size, test_size])

    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=2,
        pin_memory=True
    )

    test_loader = DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=2,
        pin_memory=True
    )

    print(f"训练集大小: {len(train_dataset)}")
    print(f"测试集大小: {len(test_dataset)}")

    return train_loader, test_loader


def train_model(model, train_loader, test_loader, device, args):
    """训练分类模型"""
    print("开始训练遥感图像分类模型...")

    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.1)

    # 记录训练历史
    train_losses = []
    train_accuracies = []
    test_accuracies = []

    best_test_acc = 0.0
    best_model_state = None

    # 训练循环
    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        correct_train = 0
        total_train = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, (images, labels) in enumerate(pbar):
            images, labels = images.to(device), labels.to(device)

            # 前向传播
            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)

            # 反向传播
            loss.backward()
            optimizer.step()

            # 统计
            running_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total_train += labels.size(0)
            correct_train += (predicted == labels).sum().item()

            # 更新进度条
            pbar.set_postfix({
                'Loss': f'{loss.item():.4f}',
                'Acc': f'{100. * correct_train / total_train:.2f}%'
            })

        # 计算平均训练损失和准确率
        avg_train_loss = running_loss / len(train_loader)
        train_acc = correct_train / total_train

        # 测试阶段
        test_acc = evaluate_model(model, test_loader, device)

        # 记录历史
        train_losses.append(avg_train_loss)
        train_accuracies.append(train_acc)
        test_accuracies.append(test_acc)

        # 保存最佳模型
        if test_acc > best_test_acc:
            best_test_acc = test_acc
            best_model_state = copy.deepcopy(model.state_dict())

        # 更新学习率
        scheduler.step()

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train Acc: {train_acc:.4f}, '
              f'Test Acc: {test_acc:.4f}')

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f'训练完成! 最佳测试准确率: {best_test_acc:.4f}')

    # 绘制训练曲线
    plot_training_curves(train_losses, train_accuracies, test_accuracies, args.output_dir)

    return model, best_test_acc, {
        'train_losses': train_losses,
        'train_accuracies': train_accuracies,
        'test_accuracies': test_accuracies,
        'best_test_acc': best_test_acc
    }


def finetune_model(model, train_loader, test_loader, device, epochs=10, lr=0.0001):
    """压缩后微调模型"""
    print(f"开始微调模型 ({epochs} 轮)...")

    # 使用较小的学习率进行微调
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-5)

    initial_acc = evaluate_model(model, test_loader, device)
    print(f"微调前准确率: {initial_acc:.4f}")

    best_acc = initial_acc
    best_state = copy.deepcopy(model.state_dict())

    for epoch in range(epochs):
        model.train()
        running_loss = 0.0
        correct = 0
        total = 0

        pbar = tqdm(train_loader, desc=f'Finetune {epoch + 1}/{epochs}')
        for images, labels in pbar:
            images, labels = images.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

            pbar.set_postfix({
                'Loss': f'{loss.item():.4f}',
                'Acc': f'{100. * correct / total:.2f}%'
            })

        # 评估
        test_acc = evaluate_model(model, test_loader, device)
        avg_loss = running_loss / len(train_loader)
        train_acc = correct / total

        print(
            f'Finetune Epoch [{epoch + 1}/{epochs}] - Loss: {avg_loss:.4f}, Train Acc: {train_acc:.4f}, Test Acc: {test_acc:.4f}')

        # 保存最佳模型
        if test_acc > best_acc:
            best_acc = test_acc
            best_state = copy.deepcopy(model.state_dict())

    # 加载最佳微调模型
    model.load_state_dict(best_state)
    final_acc = evaluate_model(model, test_loader, device)

    print(f"微调完成! 最终准确率: {final_acc:.4f} (提升: {final_acc - initial_acc:.4f})")

    return model, final_acc


def evaluate_model(model, dataloader, device):
    """评估分类模型准确率"""
    model.eval()
    correct = 0
    total = 0

    with torch.no_grad():
        for images, labels in dataloader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = correct / total if total > 0 else 0
    return accuracy


def plot_training_curves(train_losses, train_accuracies, test_accuracies, output_dir):
    """绘制训练曲线"""
    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(12, 4))

    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(epochs, train_losses, 'b-', label='Training Loss')
    plt.title('Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)

    # 绘制准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(epochs, train_accuracies, 'b-', label='Training Accuracy')
    plt.plot(epochs, test_accuracies, 'r-', label='Test Accuracy')
    plt.title('Training and Test Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'training_curves.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"训练曲线已保存到: {os.path.join(output_dir, 'training_curves.png')}")


def visualize_samples(dataloader, output_dir, num_samples=16):
    """可视化数据样本"""
    class_names = ['农田', '森林', '城市', '水体', '裸土', '草地', '道路', '工业区', '山地', '其他']

    fig, axes = plt.subplots(4, 4, figsize=(12, 12))
    fig.suptitle('遥感图像分类数据样本', fontsize=16)

    images, labels = next(iter(dataloader))

    for i in range(min(num_samples, len(images))):
        row, col = i // 4, i % 4

        img = images[i].permute(1, 2, 0).numpy()
        img = np.clip(img, 0, 1)

        axes[row, col].imshow(img)
        axes[row, col].set_title(f'{class_names[labels[i]]} (Class {labels[i]})')
        axes[row, col].axis('off')

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'data_samples.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"数据样本已保存到: {os.path.join(output_dir, 'data_samples.png')}")


def get_model_size(model):
    """计算模型在内存中的大小（MB）"""
    param_size = 0
    for param in model.parameters():
        param_size += param.nelement() * param.element_size()
    size_mb = param_size / (1024 * 1024)
    return size_mb


def quantize_weights(weight, bits=8):
    """改进的量化函数"""
    if bits >= 16:  # 16位及以上不进行量化
        return weight.clone(), None, None, None

    if bits == 8:  # 8位量化使用对称量化
        max_val = max(weight.abs().max(), 1e-8)  # 避免除零
        scale = max_val / 127.0  # 使用-127到127的范围
        q_weight = torch.round(weight / scale)
        q_weight = torch.clamp(q_weight, -127, 127)
        dq_weight = q_weight * scale
        return dq_weight, scale, torch.tensor(0.0), q_weight
    else:
        # 对于更低位数使用非对称量化
        qmin, qmax = -(2 ** (bits - 1)), 2 ** (bits - 1) - 1
        min_val, max_val = weight.min(), weight.max()

        if abs(max_val - min_val) < 1e-8:
            return weight.clone(), None, None, None

        scale = (max_val - min_val) / (qmax - qmin)
        zero_point = qmin - min_val / scale

        q_weight = torch.round(weight / scale + zero_point)
        q_weight = torch.clamp(q_weight, qmin, qmax)
        dq_weight = (q_weight - zero_point) * scale

        return dq_weight, scale, zero_point, q_weight


def prune_weights(weight, sparsity=0.2):
    """改进的剪枝函数，对分类任务更保守"""
    if sparsity <= 0:
        return weight.clone(), None

    weight_abs = weight.abs()

    # 对于分类任务，使用更保守的剪枝策略
    if len(weight.shape) == 4:  # 卷积层
        # 计算每个filter的重要性
        filter_importance = weight_abs.sum(dim=(1, 2, 3))
        # 保留更多重要的filter（降低剪枝强度）
        num_filters_to_keep = max(1, int(weight.shape[0] * (1 - sparsity * 0.3)))
        _, important_filters = torch.topk(filter_importance, num_filters_to_keep)

        # 创建mask
        mask = torch.zeros_like(weight).bool()
        mask[important_filters] = True

        # 在保留的filter内部进行轻度剪枝
        remaining_weight = weight[important_filters]
        remaining_abs = remaining_weight.abs().flatten()
        k = int(remaining_abs.numel() * sparsity * 0.3)  # 大幅降低内部剪枝强度
        if k > 0 and k < remaining_abs.numel():
            threshold = torch.kthvalue(remaining_abs, k).values
            internal_mask = (remaining_weight.abs() >= threshold)
            mask[important_filters] = internal_mask
    elif len(weight.shape) == 2:  # 全连接层 - 分类层很重要，更保守
        weight_flat = weight_abs.flatten()
        k = int(weight_flat.numel() * sparsity * 0.5)  # 降低全连接层剪枝强度
        if k >= weight_flat.numel():
            return torch.zeros_like(weight), torch.zeros_like(weight).bool()
        threshold = torch.kthvalue(weight_flat, k).values
        mask = (weight.abs() >= threshold)
    else:
        # 其他层使用标准剪枝
        weight_flat = weight_abs.flatten()
        k = int(weight_flat.numel() * sparsity)
        if k >= weight_flat.numel():
            return torch.zeros_like(weight), torch.zeros_like(weight).bool()
        threshold = torch.kthvalue(weight_flat, k).values
        mask = (weight.abs() >= threshold)

    pruned = weight * mask.float()
    return pruned, mask


def compress_model(model, bits=8, sparsity=0.2):
    """改进的模型压缩函数，针对分类任务优化"""
    print(f"压缩模型（量化位数={bits}, 剪枝稀疏度={sparsity}）...")

    compressed_model = copy.deepcopy(model)
    compression_info = {}
    model_info = {
        'model_type': str(type(model).__name__),
        'bits': bits,
        'sparsity': sparsity,
        'compressed_layers': compression_info
    }

    total_elements = 0
    zero_elements = 0

    for name, param in compressed_model.named_parameters():
        if 'weight' in name and param.numel() > 1:  # 只压缩权重层，跳过单个参数
            with torch.no_grad():
                original_param = param.data.clone()
                total_elements += param.numel()

                # 对分类器层使用更保守的压缩
                is_classifier = 'classifier' in name or 'fc' in name or 'head' in name
                current_sparsity = sparsity * 0.5 if is_classifier else sparsity
                current_bits = max(8, bits) if is_classifier else bits

                # 先应用剪枝
                if current_sparsity > 0:
                    pruned_weight, mask = prune_weights(original_param, current_sparsity)
                    zero_elements += param.numel() - torch.count_nonzero(pruned_weight).item()
                else:
                    pruned_weight = original_param
                    mask = torch.ones_like(original_param).bool()

                # 再应用量化
                if current_bits < 16:
                    quantized_weight, scale, zero_point, q_weight = quantize_weights(pruned_weight, current_bits)
                    if quantized_weight is not None:
                        param.copy_(quantized_weight)
                    else:
                        param.copy_(pruned_weight)
                        scale, zero_point, q_weight = None, None, None
                else:
                    param.copy_(pruned_weight)
                    scale, zero_point, q_weight = None, None, None

                # 保存压缩信息
                if mask is not None:
                    nonzero_mask = mask & (param.data.abs() > 1e-8)  # 确保真正非零
                    if torch.any(nonzero_mask):
                        nonzero_indices = nonzero_mask.nonzero(as_tuple=False).cpu().numpy().tolist()
                        if q_weight is not None:
                            nonzero_values = q_weight[nonzero_mask].cpu().numpy().astype(np.float32).tolist()
                        else:
                            nonzero_values = param.data[nonzero_mask].cpu().numpy().astype(np.float32).tolist()

                        compression_info[name] = {
                            'shape': list(param.shape),
                            'indices': nonzero_indices,
                            'values': nonzero_values,
                            'scale': float(scale) if scale is not None else 1.0,
                            'zero_point': float(zero_point) if zero_point is not None else 0.0,
                            'bits': current_bits,
                            'original_dtype': str(original_param.dtype)
                        }
                    else:
                        # 如果所有权重都被剪枝，保存空层信息
                        compression_info[name] = {
                            'shape': list(param.shape),
                            'indices': [],
                            'values': [],
                            'scale': 1.0,
                            'zero_point': 0.0,
                            'bits': current_bits,
                            'original_dtype': str(original_param.dtype)
                        }
        elif 'bias' in name:
            # 偏置参数通常不压缩，直接保存
            compression_info[name] = {
                'shape': list(param.shape),
                'values': param.data.cpu().numpy().astype(np.float32).tolist(),
                'is_bias': True,
                'original_dtype': str(param.dtype)
            }

    overall_sparsity = zero_elements / total_elements if total_elements > 0 else 0
    print(f"整体稀疏度: {overall_sparsity:.4f}")

    return compressed_model, model_info, overall_sparsity


def save_compressed_model_simple(model, path):
    """简化的压缩模型保存 - 直接保存PyTorch模型"""
    # 方法1: 保存完整的压缩后模型
    torch.save({
        'model_state_dict': model.state_dict(),
        'model_class': str(type(model).__name__),
        'compression_applied': True
    }, path)
    return os.path.getsize(path)


def load_compressed_model_simple(path, model_name="custom", num_classes=10, device=None):
    """简化的压缩模型加载"""
    try:
        checkpoint = torch.load(path, map_location=device if device else 'cpu')
        model = load_classification_model(model_name, num_classes, use_pretrained=False)
        model.load_state_dict(checkpoint['model_state_dict'])
        if device:
            model = model.to(device)
        return model
    except Exception as e:
        print(f"加载模型失败: {e}")
        return None


def main():
    """主函数"""
    args = get_args()

    # 确保输出目录存在
    os.makedirs(args.output_dir, exist_ok=True)

    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 获取压缩参数
    if args.bits is not None and args.sparsity is not None:
        compression_params = {'bits': args.bits, 'sparsity': args.sparsity}
    else:
        compression_params = get_compression_params(args.compression_level)

    print(f"压缩级别: {args.compression_level}")
    print(f"压缩参数: {compression_params['bits']}位量化, {compression_params['sparsity']}稀疏度")

    # 创建或加载模型
    model = load_classification_model(args.model, args.num_classes, args.use_pretrained)
    model = model.to(device)

    trained_model_path = os.path.join(args.output_dir, f"{args.model}_trained.pth")
    training_history = None

    # 训练模式
    if args.mode in ['train', 'both']:
        print("=" * 50)
        print("开始训练阶段")
        print("=" * 50)

        # 创建数据加载器
        train_loader, test_loader = create_dataloaders(
            args.data_dir, args.batch_size, args.dataset_size,
            args.img_size, args.num_classes
        )

        # 可视化数据样本
        visualize_samples(train_loader, args.output_dir)

        # 训练模型
        model, best_acc, training_history = train_model(
            model, train_loader, test_loader, device, args
        )

        # 保存训练好的模型
        torch.save(model.state_dict(), trained_model_path)
        print(f"训练好的模型已保存到: {trained_model_path}")

        # 保存训练历史
        history_path = os.path.join(args.output_dir, "training_history.json")
        with open(history_path, 'w') as f:
            json.dump(training_history, f, indent=2)

    # 仅压缩模式：加载预训练模型
    elif args.mode == 'compress':
        if args.pretrained_path and os.path.exists(args.pretrained_path):
            model.load_state_dict(torch.load(args.pretrained_path, map_location=device))
            print(f"已加载预训练模型: {args.pretrained_path}")
        elif os.path.exists(trained_model_path):
            model.load_state_dict(torch.load(trained_model_path, map_location=device))
            print(f"已加载训练好的模型: {trained_model_path}")
        else:
            print("警告: 没有找到预训练模型，将使用随机初始化的模型进行压缩")

    # 压缩模式
    if args.mode in ['compress', 'both']:
        print("=" * 50)
        print("开始压缩阶段")
        print("=" * 50)

        # 创建测试数据加载器用于评估
        _, test_loader = create_dataloaders(
            args.data_dir, args.batch_size, args.dataset_size,
            args.img_size, args.num_classes
        )

        # 评估原始模型
        print("评估原始模型...")
        original_accuracy = evaluate_model(model, test_loader, device)

        # 计算原始模型大小
        original_size = get_model_size(model)

        # 保存原始模型并计算文件大小
        original_path = os.path.join(args.output_dir, f"{args.model}_original.pth")
        torch.save(model.state_dict(), original_path)
        original_file_size = os.path.getsize(original_path) / (1024 * 1024)

        print(f"原始模型准确率: {original_accuracy:.4f}")
        print(f"原始模型内存大小: {original_size:.2f} MB")
        print(f"原始模型文件大小: {original_file_size:.2f} MB")

        # 压缩模型
        start_time = time.time()
        compressed_model, compressed_info, actual_sparsity = compress_model(
            model,
            compression_params['bits'],
            compression_params['sparsity']
        )
        compression_time = time.time() - start_time
        print(f"压缩完成，耗时: {compression_time:.2f} 秒")

        # 评估压缩后的模型
        print("评估压缩后的模型...")
        compressed_model = compressed_model.to(device)
        compressed_accuracy = evaluate_model(compressed_model, test_loader, device)

        print(f"压缩后模型准确率: {compressed_accuracy:.4f}")
        print(f"准确率下降: {(original_accuracy - compressed_accuracy) * 100:.2f}%")

        # 如果性能下降过大，进行微调
        if (original_accuracy - compressed_accuracy) > 0.05 and args.finetune_epochs > 0:
            print("性能下降较大，开始微调...")
            train_loader, _ = create_dataloaders(
                args.data_dir, args.batch_size, args.dataset_size,
                args.img_size, args.num_classes
            )
            compressed_model, finetuned_accuracy = finetune_model(
                compressed_model, train_loader, test_loader, device, args.finetune_epochs
            )
            print(f"微调后准确率: {finetuned_accuracy:.4f}")
        else:
            finetuned_accuracy = compressed_accuracy

        # 计算压缩后的大小
        compressed_size = get_model_size(compressed_model)

        # 保存压缩模型并计算实际文件大小
        compressed_path = os.path.join(args.output_dir, f"{args.model}_compressed.pth")
        compressed_file_size_bytes = save_compressed_model_simple(compressed_model, compressed_path)
        compressed_file_size = compressed_file_size_bytes / (1024 * 1024)

        print(f"压缩后模型内存大小: {compressed_size:.2f} MB")
        print(f"压缩后模型文件大小: {compressed_file_size:.2f} MB")
        print(f"实际稀疏度: {actual_sparsity:.4f}")

        # 计算压缩率
        file_compression_ratio = original_file_size / compressed_file_size

        # 测试加载压缩模型
        print("测试加载压缩模型...")
        loaded_model = load_compressed_model_simple(compressed_path, args.model, args.num_classes, device)
        if loaded_model is not None:
            loaded_accuracy = evaluate_model(loaded_model, test_loader, device)
            print(f"加载后模型准确率: {loaded_accuracy:.4f}")
        else:
            print("加载压缩模型失败")
            loaded_accuracy = finetuned_accuracy  # 使用压缩后的准确率作为备用

        # 保存完整的结果信息
        results = {
            'model': args.model,
            'mode': args.mode,
            'task': 'remote_sensing_classification',
            'num_classes': args.num_classes,
            'compression_level': args.compression_level,
            'use_pretrained': args.use_pretrained,
            'training_params': {
                'epochs': args.epochs,
                'batch_size': args.batch_size,
                'learning_rate': args.learning_rate,
                'dataset_size': args.dataset_size,
                'img_size': args.img_size
            } if args.mode in ['train', 'both'] else None,
            'compression_params': {
                'bits': compression_params['bits'],
                'target_sparsity': compression_params['sparsity'],
                'actual_sparsity': float(actual_sparsity),
                'finetune_epochs': args.finetune_epochs
            },
            'model_sizes': {
                'original_memory_mb': float(original_size),
                'original_file_mb': float(original_file_size),
                'compressed_memory_mb': float(compressed_size),
                'compressed_file_mb': float(compressed_file_size),
                'file_compression_ratio': float(file_compression_ratio)
            },
            'accuracies': {
                'original': float(original_accuracy),
                'compressed': float(compressed_accuracy),
                'finetuned': float(finetuned_accuracy),
                'loaded': float(loaded_accuracy),
                'accuracy_drop_before_finetune': float(original_accuracy - compressed_accuracy),
                'accuracy_drop_after_finetune': float(original_accuracy - finetuned_accuracy)
            },
            'training_history': training_history
        }

        # 保存结果
        results_path = os.path.join(args.output_dir, "complete_results.json")
        with open(results_path, 'w') as f:
            json.dump(results, f, indent=2)

        # 输出最终统计
        print("\n" + "=" * 60)
        print("最终结果统计")
        print("=" * 60)
        if training_history:
            print(f"训练最佳准确率: {training_history['best_test_acc']:.4f}")
        print(f"原始模型准确率: {original_accuracy:.4f}")
        print(f"压缩后模型准确率: {compressed_accuracy:.4f}")
        print(f"微调后模型准确率: {finetuned_accuracy:.4f}")
        print(f"最终准确率下降: {(original_accuracy - finetuned_accuracy) * 100:.2f}%")
        print(f"文件压缩率: {file_compression_ratio:.2f}倍")
        print(f"原始文件大小: {original_file_size:.2f} MB")
        print(f"压缩文件大小: {compressed_file_size:.2f} MB")
        print(f"量化位数: {compression_params['bits']}")
        print(f"实际稀疏度: {actual_sparsity:.4f}")

        # 评估压缩效果
        accuracy_drop_percent = (original_accuracy - finetuned_accuracy) * 100
        if accuracy_drop_percent < 2:
            print("✅ 压缩效果: 优秀 (准确率下降 < 2%)")
        elif accuracy_drop_percent < 5:
            print("✅ 压缩效果: 良好 (准确率下降 < 5%)")
        elif accuracy_drop_percent < 10:
            print("⚠️ 压缩效果: 可接受 (准确率下降 < 10%)")
        else:
            print("❌ 压缩效果: 需要调整 (准确率下降过大)")

        print(f"结果已保存到: {results_path}")

    print("\n遥感图像分类模型训练和压缩完成!")


if __name__ == "__main__":
    main()