#!/usr/bin/env python
"""
雷达目标识别AConvNet模型训练和智能压缩实现 - 改进版
AConvNet是专门为SAR图像目标识别设计的CNN模型
本脚本实现模型训练、智能压缩并真正减小模型文件大小
支持渐进式压缩策略，确保性能损失在可接受范围内
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset, random_split
import numpy as np
import copy
import time
import json
import gzip
import matplotlib.pyplot as plt
from tqdm import tqdm
import random


# 定义改进的AConvNet模型
class AConvNet(nn.Module):
    def __init__(self, num_classes=10, dropout_rate=0.5):
        super(AConvNet, self).__init__()
        # 第一个卷积块
        self.conv1 = nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2)
        self.bn1 = nn.BatchNorm2d(16)
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        # 第二个卷积块
        self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2)
        self.bn2 = nn.BatchNorm2d(32)
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        # 第三个卷积块
        self.conv3 = nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2)
        self.bn3 = nn.BatchNorm2d(64)
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)

        # 第四个卷积块
        self.conv4 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
        self.bn4 = nn.BatchNorm2d(128)

        # 全连接层
        self.fc1 = nn.Linear(128, 256)
        self.fc2 = nn.Linear(256, 128)
        self.fc3 = nn.Linear(128, num_classes)

        # Dropout层防止过拟合
        self.dropout = nn.Dropout(dropout_rate)

        # 初始化权重
        self._init_weights()

    def _init_weights(self):
        """改进的权重初始化"""
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        # 卷积块1
        x = self.conv1(x)
        x = self.bn1(x)
        x = F.relu(x)
        x = self.pool1(x)

        # 卷积块2
        x = self.conv2(x)
        x = self.bn2(x)
        x = F.relu(x)
        x = self.pool2(x)

        # 卷积块3
        x = self.conv3(x)
        x = self.bn3(x)
        x = F.relu(x)
        x = self.pool3(x)

        # 卷积块4
        x = self.conv4(x)
        x = self.bn4(x)
        x = F.relu(x)

        # 全局平均池化
        x = F.adaptive_avg_pool2d(x, (1, 1))
        x = x.view(x.size(0), -1)

        # 全连接层
        x = self.fc1(x)
        x = F.relu(x)
        x = self.dropout(x)

        x = self.fc2(x)
        x = F.relu(x)
        x = self.dropout(x)

        x = self.fc3(x)

        return x


# 改进的SAR图像数据集
class ImprovedSARDataset(Dataset):
    def __init__(self, size=2000, img_size=88, num_classes=10, add_noise=True, augment=True):
        self.size = size
        self.img_size = img_size
        self.num_classes = num_classes
        self.add_noise = add_noise
        self.augment = augment

        # 生成具有更复杂模式的数据
        self.data = []
        self.labels = []

        print(f"生成 {size} 个改进的SAR目标样本...")
        for i in tqdm(range(size)):
            label = i % num_classes
            # 为每个类别创建更复杂的基础模式
            img = self._generate_enhanced_pattern(label)

            if add_noise:
                noise_level = np.random.uniform(0.05, 0.15)
                img += torch.randn_like(img) * noise_level

            if augment:
                img = self._apply_augmentation(img)

            # 归一化
            img = (img - img.mean()) / (img.std() + 1e-8)
            img = torch.clamp(img, -3, 3)  # 限制范围

            self.data.append(img)
            self.labels.append(label)

    def _generate_enhanced_pattern(self, class_id):
        """为每个类别生成更复杂的增强模式"""
        img = torch.zeros(1, self.img_size, self.img_size)
        center = self.img_size // 2

        if class_id == 0:  # 坦克类型 - 矩形+圆形组合
            # 主体矩形
            h_start, h_end = center - 12, center + 12
            w_start, w_end = center - 18, center + 18
            img[0, h_start:h_end, w_start:w_end] = 1.0

            # 炮塔圆形
            y, x = torch.meshgrid(torch.arange(self.img_size), torch.arange(self.img_size), indexing='ij')
            mask = ((x - center) ** 2 + (y - center + 5) ** 2) < 6 ** 2
            img[0][mask] = 1.2

        elif class_id == 1:  # 装甲车 - 椭圆形
            y, x = torch.meshgrid(torch.arange(self.img_size), torch.arange(self.img_size), indexing='ij')
            mask = ((x - center) ** 2 / 15 ** 2 + (y - center) ** 2 / 10 ** 2) < 1
            img[0][mask] = 1.0

        elif class_id == 2:  # 飞机 - 十字形+菱形
            # 机身
            img[0, center - 2:center + 2, center - 20:center + 20] = 1.0
            # 机翼
            img[0, center - 15:center + 15, center - 3:center + 3] = 1.0

        elif class_id == 3:  # 船只 - 长椭圆
            y, x = torch.meshgrid(torch.arange(self.img_size), torch.arange(self.img_size), indexing='ij')
            mask = ((x - center) ** 2 / 25 ** 2 + (y - center) ** 2 / 8 ** 2) < 1
            img[0][mask] = 1.0

        elif class_id == 4:  # 卡车 - 多矩形组合
            # 驾驶室
            img[0, center - 8:center + 8, center - 15:center - 5] = 1.0
            # 货箱
            img[0, center - 6:center + 6, center - 5:center + 20] = 0.8

        elif class_id == 5:  # 雷达站 - 圆形+射线
            # 主圆
            y, x = torch.meshgrid(torch.arange(self.img_size), torch.arange(self.img_size), indexing='ij')
            mask = ((x - center) ** 2 + (y - center) ** 2) < 12 ** 2
            img[0][mask] = 1.0

            # 射线
            for angle in [0, 45, 90, 135]:
                rad = np.radians(angle)
                for r in range(12, 25):
                    x_pos = int(center + r * np.cos(rad))
                    y_pos = int(center + r * np.sin(rad))
                    if 0 <= x_pos < self.img_size and 0 <= y_pos < self.img_size:
                        img[0, y_pos, x_pos] = 0.6

        elif class_id == 6:  # 建筑 - 方形网格
            for i in range(center - 15, center + 15, 6):
                for j in range(center - 15, center + 15, 6):
                    img[0, i:i + 4, j:j + 4] = 1.0

        elif class_id == 7:  # 桥梁 - 直线+支撑
            img[0, center - 2:center + 2, :] = 1.0
            for x_pos in range(10, self.img_size - 10, 15):
                img[0, center - 10:center + 10, x_pos:x_pos + 2] = 0.8

        elif class_id == 8:  # 导弹发射器 - 复杂几何
            # 基座
            img[0, center - 5:center + 5, center - 10:center + 10] = 1.0
            # 发射管
            img[0, center - 15:center - 5, center - 2:center + 2] = 1.2

        else:  # class_id == 9 - 其他目标
            # 随机复杂形状
            np.random.seed(class_id * 123)
            num_shapes = np.random.randint(3, 6)
            for _ in range(num_shapes):
                x_c = np.random.randint(center - 15, center + 15)
                y_c = np.random.randint(center - 15, center + 15)
                size = np.random.randint(3, 8)
                intensity = np.random.uniform(0.6, 1.2)

                y, x = torch.meshgrid(torch.arange(self.img_size), torch.arange(self.img_size), indexing='ij')
                mask = ((x - x_c) ** 2 + (y - y_c) ** 2) < size ** 2
                img[0][mask] = intensity

        return img

    def _apply_augmentation(self, img):
        """应用数据增强"""
        # 随机旋转
        if random.random() > 0.5:
            angle = random.uniform(-15, 15)
            img = self._rotate_image(img, angle)

        # 随机缩放
        if random.random() > 0.5:
            scale = random.uniform(0.9, 1.1)
            img = self._scale_image(img, scale)

        # 随机平移
        if random.random() > 0.5:
            dx = random.randint(-3, 3)
            dy = random.randint(-3, 3)
            img = self._translate_image(img, dx, dy)

        return img

    def _rotate_image(self, img, angle):
        """旋转图像"""
        # 简单的旋转实现
        return img  # 为简化，这里不实现旋转

    def _scale_image(self, img, scale):
        """缩放图像"""
        # 简单的缩放实现
        return img * scale

    def _translate_image(self, img, dx, dy):
        """平移图像"""
        if dx == 0 and dy == 0:
            return img

        shifted = torch.zeros_like(img)
        h, w = img.shape[1], img.shape[2]

        # 计算有效区域
        src_x_start = max(0, -dx)
        src_x_end = min(w, w - dx)
        src_y_start = max(0, -dy)
        src_y_end = min(h, h - dy)

        dst_x_start = max(0, dx)
        dst_x_end = dst_x_start + (src_x_end - src_x_start)
        dst_y_start = max(0, dy)
        dst_y_end = dst_y_start + (src_y_end - src_y_start)

        shifted[0, dst_y_start:dst_y_end, dst_x_start:dst_x_end] = img[0, src_y_start:src_y_end, src_x_start:src_x_end]

        return shifted

    def __len__(self):
        return self.size

    def __getitem__(self, idx):
        return self.data[idx], self.labels[idx]


def get_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="雷达目标识别AConvNet模型训练和智能压缩实现")

    # 数据和输出参数
    parser.add_argument("--data-dir", "-d", default="./data",
                        help="SAR数据集目录路径")
    parser.add_argument("--output-dir", "-o", default="./output",
                        help="模型和结果的输出目录")
    parser.add_argument("--num-classes", type=int, default=10,
                        help="目标类别数量 (默认: 10)")

    # 训练参数
    parser.add_argument("--epochs", "-e", type=int, default=30,
                        help="训练轮数 (默认: 30)")
    parser.add_argument("--batch-size", "-b", type=int, default=32,
                        help="批次大小 (默认: 32)")
    parser.add_argument("--learning-rate", "-lr", type=float, default=0.001,
                        help="学习率 (默认: 0.001)")
    parser.add_argument("--weight-decay", type=float, default=1e-4,
                        help="权重衰减 (默认: 1e-4)")
    parser.add_argument("--dataset-size", type=int, default=3000,
                        help="数据集大小 (默认: 3000)")

    # 智能压缩参数
    parser.add_argument("--max-performance-loss", type=float, default=0.15,
                        help="最大允许的性能损失比例 (默认15%)")
    parser.add_argument("--target-compression-ratio", type=float, default=4.0,
                        help="目标压缩比 (默认4倍)")
    parser.add_argument("--compression-strategy", choices=['conservative', 'balanced', 'aggressive'],
                        default='balanced', help="压缩策略")

    # 传统压缩参数（用于手动模式）
    parser.add_argument("--bits", type=int, default=8,
                        help="量化位数（智能模式下会自动调整）")
    parser.add_argument("--sparsity", type=float, default=0.3,
                        help="剪枝稀疏度（智能模式下会自动调整）")

    # 模式选择
    parser.add_argument("--mode", choices=['train', 'compress', 'both'], default='both',
                        help="运行模式: train(仅训练), compress(仅压缩), both(训练+压缩)")
    parser.add_argument("--compression-mode", choices=['smart', 'manual'], default='smart',
                        help="压缩模式：smart=智能压缩，manual=手动参数")
    parser.add_argument("--pretrained-path", type=str, default=None,
                        help="预训练模型路径 (用于仅压缩模式)")

    return parser.parse_args()


def create_dataloaders(data_dir, batch_size=32, num_classes=10, dataset_size=3000):
    """创建SAR目标识别数据集加载器"""
    print("准备SAR目标识别数据集...")

    # 创建完整数据集
    full_dataset = ImprovedSARDataset(
        size=dataset_size,
        img_size=88,
        num_classes=num_classes,
        add_noise=True,
        augment=True
    )

    # 划分训练集和测试集 (80%训练, 20%测试)
    train_size = int(0.8 * len(full_dataset))
    test_size = len(full_dataset) - train_size
    train_dataset, test_dataset = random_split(full_dataset, [train_size, test_size])

    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=2,
        pin_memory=True
    )

    test_loader = DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=2,
        pin_memory=True
    )

    print(f"训练集大小: {len(train_dataset)}")
    print(f"测试集大小: {len(test_dataset)}")

    return train_loader, test_loader


def train_model(model, train_loader, test_loader, device, args):
    """训练AConvNet模型"""
    print("开始训练AConvNet模型...")

    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)

    # 改进的学习率调度
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[15, 25], gamma=0.1)

    # 记录训练历史
    train_losses = []
    train_accuracies = []
    test_accuracies = []

    best_test_acc = 0.0
    best_model_state = None
    patience_counter = 0
    patience = 10

    # 训练循环
    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        correct_train = 0
        total_train = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, (images, labels) in enumerate(pbar):
            images, labels = images.to(device), labels.to(device)

            # 前向传播
            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)

            # 反向传播
            loss.backward()

            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)

            optimizer.step()

            # 统计
            running_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total_train += labels.size(0)
            correct_train += (predicted == labels).sum().item()

            # 更新进度条
            pbar.set_postfix({
                'Loss': f'{loss.item():.4f}',
                'Acc': f'{100. * correct_train / total_train:.2f}%'
            })

        # 计算平均训练损失和准确率
        avg_train_loss = running_loss / len(train_loader)
        train_acc = correct_train / total_train

        # 测试阶段
        test_acc = evaluate_model(model, test_loader, device)

        # 记录历史
        train_losses.append(avg_train_loss)
        train_accuracies.append(train_acc)
        test_accuracies.append(test_acc)

        # 保存最佳模型
        if test_acc > best_test_acc:
            best_test_acc = test_acc
            best_model_state = copy.deepcopy(model.state_dict())
            patience_counter = 0
        else:
            patience_counter += 1

        # 更新学习率
        scheduler.step()

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train Acc: {train_acc:.4f}, '
              f'Test Acc: {test_acc:.4f}, '
              f'LR: {optimizer.param_groups[0]["lr"]:.6f}')

        # 早停检查
        if patience_counter >= patience:
            print(f"早停：{patience}轮无改善")
            break

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f'训练完成! 最佳测试准确率: {best_test_acc:.4f}')

    # 绘制训练曲线
    plot_training_curves(train_losses, train_accuracies, test_accuracies, args.output_dir)

    return model, best_test_acc, {
        'train_losses': train_losses,
        'train_accuracies': train_accuracies,
        'test_accuracies': test_accuracies,
        'best_test_acc': best_test_acc
    }


def evaluate_model(model, dataloader, device):
    """评估模型准确率"""
    model.eval()
    correct = 0
    total = 0

    with torch.no_grad():
        for images, labels in dataloader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = correct / total if total > 0 else 0
    return accuracy


def plot_training_curves(train_losses, train_accuracies, test_accuracies, output_dir):
    """绘制训练曲线"""
    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(12, 4))

    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(epochs, train_losses, 'b-', label='Training Loss')
    plt.title('Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)

    # 绘制准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(epochs, train_accuracies, 'b-', label='Training Accuracy')
    plt.plot(epochs, test_accuracies, 'r-', label='Test Accuracy')
    plt.title('Training and Test Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'training_curves.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"训练曲线已保存到: {os.path.join(output_dir, 'training_curves.png')}")


def get_model_size(model):
    """计算模型在内存中的大小（MB）"""
    param_size = 0
    for param in model.parameters():
        param_size += param.nelement() * param.element_size()
    size_mb = param_size / (1024 * 1024)
    return size_mb


def get_layer_importance_simple(model):
    """简单的层重要性计算方法（基于权重大小）"""
    layer_importance = {}

    for name, param in model.named_parameters():
        if param.requires_grad and 'weight' in name:
            # 基于权重的L2范数计算重要性
            importance = param.data.norm(p=2).item()
            layer_importance[name] = importance

    return layer_importance


def get_layer_importance(model, test_loader, device, sample_size=100):
    """计算各层的重要性得分，用于智能压缩"""
    try:
        # 保存原始模式
        original_mode = model.training
        model.train()
        layer_gradients = {}

        # 初始化梯度累积器
        for name, param in model.named_parameters():
            if param.requires_grad:
                layer_gradients[name] = torch.zeros_like(param)

        sample_count = 0
        with torch.enable_grad():
            for images, labels in test_loader:
                if sample_count >= sample_size:
                    break

                images, labels = images.to(device), labels.to(device)

                # 前向传播
                model.zero_grad()
                outputs = model(images)
                loss = nn.CrossEntropyLoss()(outputs, labels)

                # 反向传播
                loss.backward()

                # 累积梯度
                for name, param in model.named_parameters():
                    if param.grad is not None:
                        layer_gradients[name] += param.grad.abs()

                sample_count += images.shape[0]

        # 恢复原始模式
        model.train(original_mode)

        # 计算各层重要性得分
        layer_importance = {}
        for name, grad_sum in layer_gradients.items():
            layer_importance[name] = grad_sum.mean().item()

        return layer_importance

    except Exception as e:
        print(f"梯度计算方法失败: {e}")
        print("使用简单的权重范数方法计算层重要性...")
        # 恢复原始模式
        model.train(original_mode)
        return get_layer_importance_simple(model)


def adaptive_layer_compression(model, layer_importance, global_sparsity, global_bits):
    """基于层重要性的自适应压缩"""
    # 计算重要性分数的分位数
    importance_values = list(layer_importance.values())
    q75 = np.percentile(importance_values, 75)
    q25 = np.percentile(importance_values, 25)

    compressed_model = copy.deepcopy(model)
    compression_stats = []

    for name, param in compressed_model.named_parameters():
        if 'weight' in name and name in layer_importance:
            importance = layer_importance[name]

            # 根据重要性调整压缩参数
            if importance >= q75:  # 高重要性层，保守压缩
                layer_sparsity = max(0.1, global_sparsity * 0.5)
                layer_bits = max(8, global_bits + 2)
            elif importance <= q25:  # 低重要性层，激进压缩
                layer_sparsity = min(0.8, global_sparsity * 1.5)
                layer_bits = max(4, global_bits - 1)
            else:  # 中等重要性层，标准压缩
                layer_sparsity = global_sparsity
                layer_bits = global_bits

            # 应用压缩
            with torch.no_grad():
                # 剪枝
                pruned_weight, mask = prune_weights(param.data, layer_sparsity)
                # 量化
                quantized_weight, scale, zero_point, q_weight = quantize_weights(pruned_weight, layer_bits)
                param.copy_(quantized_weight)

            compression_stats.append({
                'layer': name,
                'importance': importance,
                'sparsity': layer_sparsity,
                'bits': layer_bits,
                'original_params': param.numel(),
                'nonzero_params': torch.count_nonzero(param).item()
            })

    return compressed_model, compression_stats


def smart_compression_search(model, test_loader, device, args):
    """智能压缩搜索，找到最佳的压缩参数组合"""
    print(f"开始智能压缩搜索...")
    print(f"目标: 性能损失 < {args.max_performance_loss * 100:.1f}%, 压缩率 > {args.target_compression_ratio:.1f}x")

    # 评估原始模型性能
    baseline_acc = evaluate_model(model, test_loader, device)
    baseline_size = get_model_size(model)

    print(f"基线性能: 准确率={baseline_acc:.4f}, 大小={baseline_size:.2f}MB")

    # 获取层重要性
    print("计算层重要性...")
    layer_importance = get_layer_importance(model, test_loader, device, sample_size=50)

    # 根据压缩策略定义搜索空间
    if args.compression_strategy == 'conservative':
        sparsity_candidates = [0.1, 0.2, 0.3, 0.4]
        bits_candidates = [8, 6]
    elif args.compression_strategy == 'balanced':
        sparsity_candidates = [0.2, 0.3, 0.4, 0.5, 0.6]
        bits_candidates = [8, 6, 5]
    else:  # aggressive
        sparsity_candidates = [0.4, 0.5, 0.6, 0.7, 0.8]
        bits_candidates = [6, 5, 4]

    best_config = None
    best_compression_ratio = 1.0
    search_results = []

    print(f"\n搜索压缩配置...")
    total_combinations = len(sparsity_candidates) * len(bits_candidates)

    with tqdm(total=total_combinations, desc="压缩搜索") as pbar:
        for sparsity in sparsity_candidates:
            for bits in bits_candidates:
                try:
                    # 创建压缩模型
                    start_time = time.time()
                    compressed_model, compression_stats = adaptive_layer_compression(
                        model, layer_importance, sparsity, bits
                    )
                    compression_time = time.time() - start_time

                    # 评估压缩后的性能
                    compressed_acc = evaluate_model(compressed_model, test_loader, device)
                    compressed_size = get_model_size(compressed_model)

                    # 计算性能损失和压缩率
                    acc_loss = (baseline_acc - compressed_acc) / baseline_acc
                    compression_ratio = baseline_size / compressed_size

                    # 记录结果
                    result = {
                        'sparsity': sparsity,
                        'bits': bits,
                        'accuracy': compressed_acc,
                        'acc_loss': acc_loss,
                        'compression_ratio': compression_ratio,
                        'compression_time': compression_time,
                        'size_mb': compressed_size,
                        'compression_stats': compression_stats
                    }
                    search_results.append(result)

                    # 检查是否满足约束条件
                    if (acc_loss <= args.max_performance_loss and
                            compression_ratio >= args.target_compression_ratio):
                        if compression_ratio > best_compression_ratio:
                            best_config = result
                            best_compression_ratio = compression_ratio

                    pbar.set_postfix({
                        'Acc Loss': f'{acc_loss * 100:.1f}%',
                        'Compression': f'{compression_ratio:.1f}x',
                        'Best': f'{best_compression_ratio:.1f}x' if best_config else 'None'
                    })

                except Exception as e:
                    print(f"压缩配置 (sparsity={sparsity}, bits={bits}) 失败: {e}")

                pbar.update(1)

    # 如果没有找到满足条件的配置，选择最佳的权衡
    if best_config is None:
        print("未找到满足所有约束的配置，选择最佳权衡...")
        valid_results = [r for r in search_results if r['acc_loss'] <= args.max_performance_loss * 1.5]
        if valid_results:
            best_config = max(valid_results, key=lambda x: x['compression_ratio'])
        else:
            best_config = min(search_results, key=lambda x: x['acc_loss'])

    # 输出搜索结果
    print(f"\n智能压缩搜索完成!")
    print(f"最佳配置: 稀疏度={best_config['sparsity']:.1f}, 量化位数={best_config['bits']}")
    print(f"性能: 准确率={best_config['accuracy']:.4f} (损失{best_config['acc_loss'] * 100:+.1f}%)")
    print(f"压缩率: {best_config['compression_ratio']:.1f}x")

    # 重新创建最佳压缩模型
    final_compressed_model, final_stats = adaptive_layer_compression(
        model, layer_importance, best_config['sparsity'], best_config['bits']
    )

    # 保存搜索结果
    search_results_path = os.path.join(args.output_dir, "compression_search_results.json")
    with open(search_results_path, 'w') as f:
        # 移除不可序列化的对象
        serializable_results = []
        for result in search_results:
            serializable_result = {k: v for k, v in result.items() if k != 'compression_stats'}
            serializable_results.append(serializable_result)
        json.dump({
            'search_results': serializable_results,
            'best_config': {k: v for k, v in best_config.items() if k != 'compression_stats'},
            'baseline_performance': {'accuracy': baseline_acc}
        }, f, indent=2)

    return final_compressed_model, best_config, search_results


def quantize_weights(weight, bits=8):
    """量化权重到指定位数"""
    qmin, qmax = 0, 2 ** bits - 1
    min_val, max_val = weight.min(), weight.max()

    if min_val == max_val:
        return weight.clone(), None, None, None

    scale = (max_val - min_val) / (qmax - qmin)
    zero_point = qmin - min_val / scale

    q_weight = torch.round(weight / scale + zero_point)
    q_weight = torch.clamp(q_weight, qmin, qmax)
    dq_weight = (q_weight - zero_point) * scale

    return dq_weight, scale, zero_point, q_weight


def prune_weights(weight, sparsity=0.7):
    """按稀疏度修剪权重"""
    if sparsity <= 0:
        return weight.clone(), None

    weight_abs = weight.abs().flatten()
    k = int(weight_abs.numel() * sparsity)

    if k >= weight_abs.numel():
        return torch.zeros_like(weight), torch.zeros_like(weight).bool()

    threshold = torch.kthvalue(weight_abs, k).values
    mask = (weight.abs() >= threshold)
    pruned = weight * mask.float()

    return pruned, mask


def compress_model(model, bits=4, sparsity=0.7):
    """传统压缩模型方法（用于手动模式）"""
    print(f"压缩模型（量化位数={bits}, 剪枝稀疏度={sparsity}）...")
    compressed_model = copy.deepcopy(model)

    total_elements = 0
    zero_elements = 0

    for name, param in compressed_model.named_parameters():
        if 'weight' in name:
            with torch.no_grad():
                total_elements += param.numel()

                pruned_weight, mask = prune_weights(param.data, sparsity)
                zero_elements += param.numel() - torch.count_nonzero(pruned_weight).item()

                quantized_weight, scale, zero_point, q_weight = quantize_weights(pruned_weight, bits)
                param.copy_(quantized_weight)

    overall_sparsity = zero_elements / total_elements if total_elements > 0 else 0
    print(f"整体稀疏度: {overall_sparsity:.4f}")

    return compressed_model, overall_sparsity


def save_compressed_model(model, path, compress=True):
    """保存压缩模型"""
    state_dict = model.state_dict()
    temp_path = path + ".temp"
    torch.save(state_dict, temp_path)

    if compress:
        with open(temp_path, 'rb') as f_in:
            with gzip.open(path, 'wb', compresslevel=9) as f_out:
                f_out.write(f_in.read())
        os.remove(temp_path)
    else:
        os.rename(temp_path, path)

    return os.path.getsize(path)


def load_compressed_model(path, args, device=None):
    """加载压缩模型"""
    try:
        with gzip.open(path, 'rb') as f:
            temp_path = path + ".temp"
            with open(temp_path, 'wb') as temp_f:
                temp_f.write(f.read())

            # 尝试使用weights_only参数（较新版本的PyTorch）
            try:
                state_dict = torch.load(temp_path, map_location='cpu', weights_only=True)
            except TypeError:
                # 如果不支持weights_only参数，使用传统方法
                state_dict = torch.load(temp_path, map_location='cpu')

            os.remove(temp_path)

            model = AConvNet(num_classes=args.num_classes)
            model.load_state_dict(state_dict)

            if device:
                model = model.to(device)

            return model

    except gzip.BadGzipFile:
        # 尝试使用weights_only参数（较新版本的PyTorch）
        try:
            state_dict = torch.load(path, map_location='cpu', weights_only=True)
        except TypeError:
            # 如果不支持weights_only参数，使用传统方法
            state_dict = torch.load(path, map_location='cpu')

        model = AConvNet(num_classes=args.num_classes)
        model.load_state_dict(state_dict)

        if device:
            model = model.to(device)

        return model


def plot_compression_analysis(search_results, output_dir):
    """绘制压缩分析图表"""
    if not search_results:
        return

    fig, axes = plt.subplots(2, 2, figsize=(12, 10))
    fig.suptitle('AConvNet Compression Analysis', fontsize=16)

    # 提取数据
    acc_losses = [r['acc_loss'] * 100 for r in search_results]
    compression_ratios = [r['compression_ratio'] for r in search_results]
    sparsities = [r['sparsity'] * 100 for r in search_results]
    bits = [r['bits'] for r in search_results]

    # 性能损失 vs 压缩率
    axes[0, 0].scatter(compression_ratios, acc_losses, c=sparsities, cmap='viridis', alpha=0.7)
    axes[0, 0].set_xlabel('Compression Ratio')
    axes[0, 0].set_ylabel('Accuracy Loss (%)')
    axes[0, 0].set_title('Performance vs Compression Trade-off')
    axes[0, 0].grid(True)
    cbar1 = plt.colorbar(axes[0, 0].collections[0], ax=axes[0, 0])
    cbar1.set_label('Sparsity (%)')

    # 稀疏度 vs 性能损失
    axes[0, 1].scatter(sparsities, acc_losses, c=bits, cmap='plasma', alpha=0.7)
    axes[0, 1].set_xlabel('Sparsity (%)')
    axes[0, 1].set_ylabel('Accuracy Loss (%)')
    axes[0, 1].set_title('Sparsity vs Performance Loss')
    axes[0, 1].grid(True)
    cbar2 = plt.colorbar(axes[0, 1].collections[0], ax=axes[0, 1])
    cbar2.set_label('Quantization Bits')

    # 量化位数 vs 压缩率
    unique_bits = sorted(list(set(bits)))
    bit_compression_ratios = []
    bit_labels = []
    for bit in unique_bits:
        ratios = [r['compression_ratio'] for r in search_results if r['bits'] == bit]
        bit_compression_ratios.append(ratios)
        bit_labels.append(f'{bit}-bit')

    if bit_compression_ratios:
        axes[1, 0].boxplot(bit_compression_ratios, labels=bit_labels)
        axes[1, 0].set_xlabel('Quantization Bits')
        axes[1, 0].set_ylabel('Compression Ratio')
        axes[1, 0].set_title('Quantization Impact on Compression')
        axes[1, 0].grid(True)

    # 稀疏度 vs 压缩率
    sparsity_levels = sorted(list(set([r['sparsity'] for r in search_results])))
    sparsity_compression_ratios = []
    sparsity_labels = []
    for sparsity in sparsity_levels:
        ratios = [r['compression_ratio'] for r in search_results if r['sparsity'] == sparsity]
        sparsity_compression_ratios.append(ratios)
        sparsity_labels.append(f'{sparsity * 100:.0f}%')

    if sparsity_compression_ratios:
        axes[1, 1].boxplot(sparsity_compression_ratios, labels=sparsity_labels)
        axes[1, 1].set_xlabel('Sparsity')
        axes[1, 1].set_ylabel('Compression Ratio')
        axes[1, 1].set_title('Sparsity Impact on Compression')
        axes[1, 1].grid(True)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'compression_analysis.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"压缩分析图表已保存到: {os.path.join(output_dir, 'compression_analysis.png')}")


def visualize_sample_predictions(model, test_loader, device, output_dir, num_samples=8):
    """可视化部分预测结果"""
    model.eval()

    fig, axes = plt.subplots(2, 4, figsize=(16, 8))
    fig.suptitle('SAR Target Recognition Sample Predictions', fontsize=16)
    axes = axes.flatten()

    class_names = ['Tank', 'APC', 'Aircraft', 'Ship', 'Truck',
                   'Radar', 'Building', 'Bridge', 'Launcher', 'Other']

    with torch.no_grad():
        for i, (images, labels) in enumerate(test_loader):
            if i >= num_samples:
                break

            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs, 1)

            # 取第一个样本
            img = images[0].cpu().squeeze().numpy()
            true_label = labels[0].cpu().item()
            pred_label = predicted[0].cpu().item()
            confidence = F.softmax(outputs[0], dim=0)[pred_label].cpu().item()

            ax = axes[i]
            ax.imshow(img, cmap='gray')
            ax.set_title(f'True: {class_names[true_label]}\n'
                         f'Pred: {class_names[pred_label]} ({confidence:.2f})',
                         color='green' if true_label == pred_label else 'red')
            ax.axis('off')

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'sample_predictions.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"样本预测结果已保存到: {os.path.join(output_dir, 'sample_predictions.png')}")


def main():
    """主函数"""
    args = get_args()

    # 确保输出目录存在
    os.makedirs(args.output_dir, exist_ok=True)

    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 创建或加载模型
    model = AConvNet(num_classes=args.num_classes)
    model = model.to(device)

    trained_model_path = os.path.join(args.output_dir, "aconvnet_trained.pth")
    training_history = None

    # 训练模式
    if args.mode in ['train', 'both']:
        print("=" * 50)
        print("开始训练阶段")
        print("=" * 50)

        # 创建数据加载器
        train_loader, test_loader = create_dataloaders(
            args.data_dir, args.batch_size, args.num_classes, args.dataset_size
        )

        # 训练模型
        model, best_acc, training_history = train_model(
            model, train_loader, test_loader, device, args
        )

        # 保存训练好的模型
        torch.save(model.state_dict(), trained_model_path)
        print(f"训练好的模型已保存到: {trained_model_path}")

        # 可视化预测结果
        visualize_sample_predictions(model, test_loader, device, args.output_dir)

        # 保存训练历史
        history_path = os.path.join(args.output_dir, "training_history.json")
        with open(history_path, 'w') as f:
            json.dump(training_history, f, indent=2)

    # 仅压缩模式：加载预训练模型
    elif args.mode == 'compress':
        if args.pretrained_path and os.path.exists(args.pretrained_path):
            model.load_state_dict(torch.load(args.pretrained_path, map_location=device, weights_only=False))
            print(f"已加载预训练模型: {args.pretrained_path}")
        elif os.path.exists(trained_model_path):
            model.load_state_dict(torch.load(trained_model_path, map_location=device, weights_only=False))
            print(f"已加载训练好的模型: {trained_model_path}")
        else:
            print("警告: 没有找到预训练模型，将使用随机初始化的模型进行压缩")

    # 压缩模式
    if args.mode in ['compress', 'both']:
        print("=" * 50)
        print(f"开始{'智能' if args.compression_mode == 'smart' else '手动'}压缩阶段")
        print("=" * 50)

        # 创建测试数据加载器用于评估
        _, test_loader = create_dataloaders(
            args.data_dir, args.batch_size, args.num_classes, args.dataset_size
        )

        # 评估原始模型
        print("评估原始模型...")
        original_accuracy = evaluate_model(model, test_loader, device)
        original_size = get_model_size(model)

        # 保存原始模型并计算文件大小
        original_path = os.path.join(args.output_dir, "aconvnet_original.pth")
        original_file_size_bytes = save_compressed_model(model, original_path, compress=False)
        original_file_size = original_file_size_bytes / (1024 * 1024)

        print(f"原始模型准确率: {original_accuracy:.4f}")
        print(f"原始模型内存大小: {original_size:.2f} MB")
        print(f"原始模型文件大小: {original_file_size:.2f} MB")

        # 选择压缩方法
        if args.compression_mode == 'smart':
            # 智能压缩
            compressed_model, best_config, search_results = smart_compression_search(
                model, test_loader, device, args
            )
            actual_sparsity = best_config['sparsity']
            compression_bits = best_config['bits']

            # 绘制压缩分析图表
            plot_compression_analysis(search_results, args.output_dir)

        else:
            # 手动压缩
            print(f"使用手动压缩参数: 稀疏度={args.sparsity}, 量化位数={args.bits}")
            start_time = time.time()
            compressed_model, actual_sparsity = compress_model(model, args.bits, args.sparsity)
            compression_time = time.time() - start_time
            print(f"压缩完成，耗时: {compression_time:.2f} 秒")
            compression_bits = args.bits
            best_config = {
                'sparsity': args.sparsity,
                'bits': args.bits,
                'compression_time': compression_time
            }
            search_results = []

        # 评估压缩后的模型
        print("评估压缩后的模型...")
        compressed_model = compressed_model.to(device)
        compressed_accuracy = evaluate_model(compressed_model, test_loader, device)

        # 计算压缩后的大小
        compressed_size = get_model_size(compressed_model)

        # 保存压缩模型并计算实际文件大小
        compressed_path = os.path.join(args.output_dir, "aconvnet_compressed.pth")
        compressed_file_size_bytes = save_compressed_model(compressed_model, compressed_path, compress=True)
        compressed_file_size = compressed_file_size_bytes / (1024 * 1024)

        print(f"压缩后模型准确率: {compressed_accuracy:.4f}")
        print(f"压缩后模型内存大小: {compressed_size:.2f} MB")
        print(f"压缩后模型文件大小: {compressed_file_size:.2f} MB")
        print(f"实际稀疏度: {actual_sparsity:.4f}")

        # 计算压缩率和性能变化
        file_compression_ratio = original_file_size / compressed_file_size
        acc_change = compressed_accuracy - original_accuracy
        acc_change_percent = (acc_change / original_accuracy) * 100

        # 测试加载压缩模型
        print("测试加载压缩模型...")
        loaded_model = load_compressed_model(compressed_path, args, device)
        loaded_accuracy = evaluate_model(loaded_model, test_loader, device)
        print(f"加载后模型准确率: {loaded_accuracy:.4f}")

        # 保存完整的结果信息
        results = {
            'model': 'AConvNet',
            'mode': args.mode,
            'compression_mode': args.compression_mode,
            'num_classes': args.num_classes,
            'model_params': {
                'num_classes': args.num_classes,
                'dataset_size': args.dataset_size
            },
            'training_params': {
                'epochs': args.epochs,
                'batch_size': args.batch_size,
                'learning_rate': args.learning_rate,
                'dataset_size': args.dataset_size
            } if args.mode in ['train', 'both'] else None,
            'compression_params': {
                'final_bits': compression_bits,
                'final_sparsity': float(actual_sparsity),
                'max_performance_loss': args.max_performance_loss,
                'target_compression_ratio': args.target_compression_ratio,
                'compression_strategy': args.compression_strategy
            },
            'model_sizes': {
                'original_memory_mb': float(original_size),
                'original_file_mb': float(original_file_size),
                'compressed_memory_mb': float(compressed_size),
                'compressed_file_mb': float(compressed_file_size),
                'file_compression_ratio': float(file_compression_ratio)
            },
            'performance_metrics': {
                'original_accuracy': float(original_accuracy),
                'compressed_accuracy': float(compressed_accuracy),
                'loaded_accuracy': float(loaded_accuracy),
                'accuracy_change': float(acc_change),
                'accuracy_change_percent': float(acc_change_percent)
            },
            'training_history': training_history,
            'best_compression_config': best_config
        }

        # 保存结果
        results_path = os.path.join(args.output_dir, "complete_results.json")
        with open(results_path, 'w') as f:
            json.dump(results, f, indent=2)

        # 输出最终统计
        print("\n" + "=" * 60)
        print("AConvNet最终结果统计")
        print("=" * 60)
        if training_history:
            print(f"训练最佳准确率: {training_history['best_test_acc']:.4f}")
        print(f"原始模型准确率: {original_accuracy:.4f}")
        print(f"压缩后模型准确率: {compressed_accuracy:.4f}")
        print(f"准确率变化: {acc_change:+.4f} ({acc_change_percent:+.1f}%)")
        print(f"文件压缩率: {file_compression_ratio:.2f}倍")
        print(f"原始文件大小: {original_file_size:.2f} MB")
        print(f"压缩文件大小: {compressed_file_size:.2f} MB")
        print(f"最终量化位数: {compression_bits}")
        print(f"最终稀疏度: {actual_sparsity:.4f}")

        # 压缩质量评估
        if abs(acc_change_percent) <= args.max_performance_loss * 100:
            print(f"✅ 压缩成功！性能损失在可接受范围内 (<{args.max_performance_loss * 100:.1f}%)")
        else:
            print(f"⚠️  压缩性能损失较大 ({acc_change_percent:.1f}%)，考虑调整压缩策略")

        if file_compression_ratio >= args.target_compression_ratio:
            print(f"✅ 达到目标压缩率 (>{args.target_compression_ratio:.1f}x)")
        else:
            print(f"📊 压缩率 {file_compression_ratio:.1f}x，未达到目标 {args.target_compression_ratio:.1f}x")

        print(f"结果已保存到: {results_path}")

    print("\nAConvNet模型训练和智能压缩完成!")


if __name__ == "__main__":
    main()