#!/usr/bin/env python
"""
雷达目标识别AConvNet模型训练和智能压缩实现 - 改进版
AConvNet是专门为SAR图像目标识别设计的CNN模型
本脚本实现模型训练、智能压缩并真正减小模型文件大小
支持渐进式压缩策略，确保性能损失在可接受范围内
新增：全面的模型评估功能，从多个维度评估压缩效果
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset, random_split
import numpy as np
import copy
import time
import json
import gzip
import matplotlib.pyplot as plt
from tqdm import tqdm
import random
import psutil
from datetime import datetime
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report


# ===================== 新增：模型评估器 =====================
class ModelEvaluator:
    """模型评估器 - 全面评估原始模型和压缩模型"""

    def __init__(self):
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    def get_model_size_mb(self, model):
        """计算模型参数占用的内存大小（MB）"""
        param_size = 0
        param_sum = 0

        for param in model.parameters():
            param_size += param.nelement() * param.element_size()
            param_sum += param.nelement()

        buffer_size = 0
        buffer_sum = 0

        for buffer in model.buffers():
            buffer_size += buffer.nelement() * buffer.element_size()
            buffer_sum += buffer.nelement()

        size_mb = (param_size + buffer_size) / 1024 / 1024
        return size_mb, param_sum, buffer_sum

    def get_file_size_mb(self, filepath):
        """获取文件大小（MB）"""
        if os.path.exists(filepath):
            return os.path.getsize(filepath) / 1024 / 1024
        return 0

    def measure_inference_time(self, model, test_loader, num_runs=5):
        """测量推理时间"""
        model.eval()
        times = []

        # 预热
        with torch.no_grad():
            for i, (data, _) in enumerate(test_loader):
                if i >= 2:  # 预热2个batch
                    break
                data = data.to(self.device)
                _ = model(data)

        # 正式测试
        with torch.no_grad():
            for run in range(num_runs):
                start_time = time.time()
                sample_count = 0
                for i, (data, _) in enumerate(test_loader):
                    if i >= 5:  # 测试5个batch
                        break
                    data = data.to(self.device)
                    _ = model(data)
                    sample_count += data.size(0)
                end_time = time.time()
                times.append(end_time - start_time)

        return np.mean(times), np.std(times)

    def measure_memory_usage(self, model, test_loader):
        """测量内存使用情况"""
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
            torch.cuda.reset_peak_memory_stats()

            # 测量推理时的显存使用
            model.eval()
            with torch.no_grad():
                for i, (data, _) in enumerate(test_loader):
                    if i >= 3:  # 测试3个batch
                        break
                    data = data.to(self.device)
                    _ = model(data)

            memory_allocated = torch.cuda.memory_allocated() / 1024 / 1024  # MB
            memory_reserved = torch.cuda.memory_reserved() / 1024 / 1024  # MB
            max_memory_allocated = torch.cuda.max_memory_allocated() / 1024 / 1024  # MB

            return {
                'allocated_mb': memory_allocated,
                'reserved_mb': memory_reserved,
                'max_allocated_mb': max_memory_allocated
            }
        else:
            # CPU内存使用
            process = psutil.Process(os.getpid())
            memory_info = process.memory_info()
            return {
                'allocated_mb': memory_info.rss / 1024 / 1024,
                'reserved_mb': memory_info.vms / 1024 / 1024,
                'max_allocated_mb': memory_info.rss / 1024 / 1024
            }

    def evaluate_model_comprehensive(self, model, test_loader, model_path=None, model_name="Model"):
        """全面评估模型"""
        print(f"\n{'=' * 60}")
        print(f"📊 评估 {model_name}")
        print(f"{'=' * 60}")

        model.eval()
        all_predictions = []
        all_targets = []
        total_samples = 0

        # 1. 准确率和精度评估
        print("1. 进行准确率和精度评估...")
        with torch.no_grad():
            for data, targets in test_loader:
                data, targets = data.to(self.device), targets.to(self.device)
                outputs = model(data)
                _, predicted = torch.max(outputs.data, 1)

                all_predictions.extend(predicted.cpu().numpy())
                all_targets.extend(targets.cpu().numpy())
                total_samples += targets.size(0)

        # 计算各种指标
        accuracy = accuracy_score(all_targets, all_predictions)
        precision_macro = precision_score(all_targets, all_predictions, average='macro', zero_division=0)
        precision_micro = precision_score(all_targets, all_predictions, average='micro', zero_division=0)
        recall_macro = recall_score(all_targets, all_predictions, average='macro', zero_division=0)
        recall_micro = recall_score(all_targets, all_predictions, average='micro', zero_division=0)
        f1_macro = f1_score(all_targets, all_predictions, average='macro', zero_division=0)
        f1_micro = f1_score(all_targets, all_predictions, average='micro', zero_division=0)

        # 2. 模型参数统计
        print("2. 计算模型参数统计...")
        model_size_mb, total_params, total_buffers = self.get_model_size_mb(model)

        # 计算不同类型参数的数量
        trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
        non_trainable_params = sum(p.numel() for p in model.parameters() if not p.requires_grad)

        # 3. 文件大小
        print("3. 检查文件大小...")
        file_size_mb = 0
        if model_path and os.path.exists(model_path):
            file_size_mb = self.get_file_size_mb(model_path)

        # 4. 推理时间测试
        print("4. 测量推理时间...")
        avg_inference_time, std_inference_time = self.measure_inference_time(model, test_loader)

        # 5. 内存使用测试
        print("5. 测量内存使用...")
        memory_usage = self.measure_memory_usage(model, test_loader)

        # 6. 模型复杂度分析
        print("6. 分析模型复杂度...")

        # 统计不同层的参数量
        layer_stats = {}
        for name, module in model.named_modules():
            if len(list(module.children())) == 0:  # 叶子节点
                params = sum(p.numel() for p in module.parameters())
                if params > 0:
                    layer_stats[name] = {
                        'params': params,
                        'type': type(module).__name__
                    }

        # 计算稀疏度
        sparsity = 0
        zero_params = 0
        total_params_count = 0
        for param in model.parameters():
            if param.requires_grad:
                total_params_count += param.numel()
                zero_params += (param.abs() < 1e-6).sum().item()
        if total_params_count > 0:
            sparsity = zero_params / total_params_count

        # 7. 构建结果字典
        results = {
            'model_name': model_name,
            'timestamp': datetime.now().isoformat(),
            'performance_metrics': {
                'accuracy': accuracy,
                'precision_macro': precision_macro,
                'precision_micro': precision_micro,
                'recall_macro': recall_macro,
                'recall_micro': recall_micro,
                'f1_macro': f1_macro,
                'f1_micro': f1_micro,
                'total_samples': total_samples
            },
            'model_complexity': {
                'total_parameters': total_params,
                'trainable_parameters': trainable_params,
                'non_trainable_parameters': non_trainable_params,
                'total_buffers': total_buffers,
                'sparsity_ratio': sparsity
            },
            'model_size': {
                'memory_size_mb': model_size_mb,
                'file_size_mb': file_size_mb,
                'parameters_size_mb': (trainable_params * 4) / 1024 / 1024  # 假设float32
            },
            'performance': {
                'avg_inference_time_sec': avg_inference_time,
                'std_inference_time_sec': std_inference_time,
                'throughput_samples_per_sec': total_samples / avg_inference_time if avg_inference_time > 0 else 0
            },
            'memory_usage': memory_usage,
            'layer_statistics': layer_stats
        }

        # 打印详细日志
        self.print_evaluation_log(results)

        return results

    def print_evaluation_log(self, results):
        """打印详细的评估日志"""
        print(f"\n📊 {results['model_name']} 详细评估报告")
        print(f"评估时间: {results['timestamp']}")
        print(f"{'=' * 80}")

        # 性能指标
        perf = results['performance_metrics']
        print(f"\n🎯 性能指标:")
        print(f"  准确率 (Accuracy):      {perf['accuracy']:.4f} ({perf['accuracy'] * 100:.2f}%)")
        print(f"  精确率 (Precision):     宏平均 {perf['precision_macro']:.4f}, 微平均 {perf['precision_micro']:.4f}")
        print(f"  召回率 (Recall):        宏平均 {perf['recall_macro']:.4f}, 微平均 {perf['recall_micro']:.4f}")
        print(f"  F1分数 (F1-Score):      宏平均 {perf['f1_macro']:.4f}, 微平均 {perf['f1_micro']:.4f}")
        print(f"  测试样本数:             {perf['total_samples']}")

        # 模型复杂度
        complexity = results['model_complexity']
        print(f"\n🏗️  模型复杂度:")
        print(f"  总参数量:               {complexity['total_parameters']:,}")
        print(f"  可训练参数:             {complexity['trainable_parameters']:,}")
        print(f"  不可训练参数:           {complexity['non_trainable_parameters']:,}")
        print(f"  缓冲区参数:             {complexity['total_buffers']:,}")
        print(f"  稀疏度:                 {complexity['sparsity_ratio']:.4f} ({complexity['sparsity_ratio'] * 100:.2f}%)")

        # 模型大小
        size = results['model_size']
        print(f"\n💾 存储占用:")
        print(f"  内存大小:               {size['memory_size_mb']:.2f} MB")
        print(f"  文件大小:               {size['file_size_mb']:.2f} MB")
        print(f"  参数存储大小:           {size['parameters_size_mb']:.2f} MB")

        # 性能表现
        performance = results['performance']
        print(f"\n⚡ 运行性能:")
        print(
            f"  平均推理时间:           {performance['avg_inference_time_sec']:.4f} ± {performance['std_inference_time_sec']:.4f} 秒")
        print(f"  吞吐量:                 {performance['throughput_samples_per_sec']:.2f} 样本/秒")

        # 内存使用
        memory = results['memory_usage']
        print(f"\n🧠 内存使用:")
        print(f"  已分配内存:             {memory['allocated_mb']:.2f} MB")
        print(f"  保留内存:               {memory['reserved_mb']:.2f} MB")
        print(f"  峰值内存:               {memory['max_allocated_mb']:.2f} MB")

        # 层统计（显示前10个最大的层）
        if results['layer_statistics']:
            print(f"\n🔍 主要层统计 (参数量前10):")
            sorted_layers = sorted(results['layer_statistics'].items(),
                                   key=lambda x: x[1]['params'], reverse=True)
            for i, (layer_name, stats) in enumerate(sorted_layers[:10]):
                print(f"  {i + 1:2d}. {layer_name:<30} {stats['type']:<15} {stats['params']:>10,} 参数")

    def compare_models(self, original_results, compressed_results):
        """比较原始模型和压缩模型"""
        print(f"\n{'=' * 80}")
        print(f"📊 模型对比分析")
        print(f"{'=' * 80}")

        # 性能对比
        orig_acc = original_results['performance_metrics']['accuracy']
        comp_acc = compressed_results['performance_metrics']['accuracy']
        acc_loss = orig_acc - comp_acc
        acc_loss_pct = (acc_loss / orig_acc) * 100 if orig_acc > 0 else 0

        print(f"\n🎯 性能对比:")
        print(f"  原始模型准确率:         {orig_acc:.4f} ({orig_acc * 100:.2f}%)")
        print(f"  压缩模型准确率:         {comp_acc:.4f} ({comp_acc * 100:.2f}%)")
        print(f"  准确率损失:             {acc_loss:.4f} ({acc_loss_pct:.2f}%)")

        # 模型大小对比
        orig_size = original_results['model_size']['file_size_mb']
        comp_size = compressed_results['model_size']['file_size_mb']
        size_ratio = orig_size / comp_size if comp_size > 0 else 0
        size_reduction = orig_size - comp_size
        size_reduction_pct = (size_reduction / orig_size) * 100 if orig_size > 0 else 0

        print(f"\n💾 大小对比:")
        print(f"  原始模型大小:           {orig_size:.2f} MB")
        print(f"  压缩模型大小:           {comp_size:.2f} MB")
        print(f"  压缩率:                 {size_ratio:.2f}x")
        print(f"  大小减少:               {size_reduction:.2f} MB ({size_reduction_pct:.2f}%)")

        # 参数量对比
        orig_params = original_results['model_complexity']['total_parameters']
        comp_params = compressed_results['model_complexity']['total_parameters']
        param_ratio = orig_params / comp_params if comp_params > 0 else 0

        print(f"\n🏗️  参数量对比:")
        print(f"  原始模型参数量:         {orig_params:,}")
        print(f"  压缩模型参数量:         {comp_params:,}")
        print(f"  参数压缩率:             {param_ratio:.2f}x")

        # 速度对比
        orig_time = original_results['performance']['avg_inference_time_sec']
        comp_time = compressed_results['performance']['avg_inference_time_sec']
        speed_ratio = orig_time / comp_time if comp_time > 0 else 0

        print(f"\n⚡ 速度对比:")
        print(f"  原始模型推理时间:       {orig_time:.4f} 秒")
        print(f"  压缩模型推理时间:       {comp_time:.4f} 秒")
        print(f"  加速比:                 {speed_ratio:.2f}x")

        # 内存使用对比
        orig_memory = original_results['memory_usage']['max_allocated_mb']
        comp_memory = compressed_results['memory_usage']['max_allocated_mb']
        memory_ratio = orig_memory / comp_memory if comp_memory > 0 else 0

        print(f"\n🧠 内存使用对比:")
        print(f"  原始模型峰值内存:       {orig_memory:.2f} MB")
        print(f"  压缩模型峰值内存:       {comp_memory:.2f} MB")
        print(f"  内存节省:               {memory_ratio:.2f}x")

        # 压缩效率总结
        compression_score = self.calculate_compression_score(acc_loss_pct, size_ratio, speed_ratio)
        recommendation = self.get_compression_recommendation(acc_loss_pct, size_ratio)

        print(f"\n📈 压缩效率总结:")
        print(f"  压缩权衡分数:           {compression_score:.2f}")
        print(f"  推荐等级:               {recommendation}")

        return {
            'accuracy_loss_pct': acc_loss_pct,
            'size_compression_ratio': size_ratio,
            'speed_improvement_ratio': speed_ratio,
            'memory_reduction_ratio': memory_ratio,
            'compression_score': compression_score,
            'recommendation': recommendation
        }

    def calculate_compression_score(self, acc_loss_pct, size_ratio, speed_ratio):
        """计算压缩效率得分"""
        # 综合考虑准确率损失、压缩率和速度提升
        score = (size_ratio * 0.4 + speed_ratio * 0.3 - acc_loss_pct * 0.03) * 10
        return max(0, min(100, score))

    def get_compression_recommendation(self, acc_loss_pct, size_ratio):
        """给出压缩推荐等级"""
        if acc_loss_pct < 5 and size_ratio > 3:
            return "⭐⭐⭐⭐⭐ 优秀"
        elif acc_loss_pct < 10 and size_ratio > 2:
            return "⭐⭐⭐⭐ 良好"
        elif acc_loss_pct < 15 and size_ratio > 2:
            return "⭐⭐⭐ 可接受"
        elif acc_loss_pct < 20:
            return "⭐⭐ 需要改进"
        else:
            return "⭐ 不推荐"


def evaluate_compression_pipeline(original_model, compressed_model, test_loader,
                                  original_model_path=None, compressed_model_path=None,
                                  output_dir="./output"):
    """完整的压缩模型评估流程"""

    evaluator = ModelEvaluator()

    print(f"\n{'=' * 60}")
    print("🔍 开始全面评估模型压缩效果...")
    print(f"{'=' * 60}")

    # 评估原始模型
    original_results = evaluator.evaluate_model_comprehensive(
        original_model, test_loader, original_model_path, "原始模型"
    )

    # 评估压缩模型
    compressed_results = evaluator.evaluate_model_comprehensive(
        compressed_model, test_loader, compressed_model_path, "压缩模型"
    )

    # 模型对比
    comparison_results = evaluator.compare_models(original_results, compressed_results)

    # 保存结果
    evaluation_results = {
        'original_model': original_results,
        'compressed_model': compressed_results,
        'comparison': comparison_results,
        'evaluation_time': datetime.now().isoformat()
    }

    # 保存到JSON文件
    eval_path = os.path.join(output_dir, 'comprehensive_evaluation.json')
    with open(eval_path, 'w', encoding='utf-8') as f:
        json.dump(evaluation_results, f, indent=2, ensure_ascii=False)

    print(f"\n✅ 详细评估报告已保存到: {eval_path}")

    return evaluation_results


# ===================== 原有代码部分 =====================

# 定义改进的AConvNet模型
class AConvNet(nn.Module):
    def __init__(self, num_classes=10, dropout_rate=0.5):
        super(AConvNet, self).__init__()
        # 第一个卷积块
        self.conv1 = nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2)
        self.bn1 = nn.BatchNorm2d(16)
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        # 第二个卷积块
        self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2)
        self.bn2 = nn.BatchNorm2d(32)
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        # 第三个卷积块
        self.conv3 = nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2)
        self.bn3 = nn.BatchNorm2d(64)
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)

        # 第四个卷积块
        self.conv4 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
        self.bn4 = nn.BatchNorm2d(128)

        # 全连接层
        self.fc1 = nn.Linear(128, 256)
        self.fc2 = nn.Linear(256, 128)
        self.fc3 = nn.Linear(128, num_classes)

        # Dropout层防止过拟合
        self.dropout = nn.Dropout(dropout_rate)

        # 初始化权重
        self._init_weights()

    def _init_weights(self):
        """改进的权重初始化"""
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        # 卷积块1
        x = self.conv1(x)
        x = self.bn1(x)
        x = F.relu(x)
        x = self.pool1(x)

        # 卷积块2
        x = self.conv2(x)
        x = self.bn2(x)
        x = F.relu(x)
        x = self.pool2(x)

        # 卷积块3
        x = self.conv3(x)
        x = self.bn3(x)
        x = F.relu(x)
        x = self.pool3(x)

        # 卷积块4
        x = self.conv4(x)
        x = self.bn4(x)
        x = F.relu(x)

        # 全局平均池化
        x = F.adaptive_avg_pool2d(x, (1, 1))
        x = x.view(x.size(0), -1)

        # 全连接层
        x = self.fc1(x)
        x = F.relu(x)
        x = self.dropout(x)

        x = self.fc2(x)
        x = F.relu(x)
        x = self.dropout(x)

        x = self.fc3(x)

        return x


# 改进的SAR图像数据集
class ImprovedSARDataset(Dataset):
    def __init__(self, size=2000, img_size=88, num_classes=10, add_noise=True, augment=True):
        self.size = size
        self.img_size = img_size
        self.num_classes = num_classes
        self.add_noise = add_noise
        self.augment = augment

        # 生成具有更复杂模式的数据
        self.data = []
        self.labels = []

        print(f"生成 {size} 个改进的SAR目标样本...")
        for i in tqdm(range(size)):
            label = i % num_classes
            # 为每个类别创建更复杂的基础模式
            img = self._generate_enhanced_pattern(label)

            if add_noise:
                noise_level = np.random.uniform(0.05, 0.15)
                img += torch.randn_like(img) * noise_level

            if augment:
                img = self._apply_augmentation(img)

            # 归一化
            img = (img - img.mean()) / (img.std() + 1e-8)
            img = torch.clamp(img, -3, 3)  # 限制范围

            self.data.append(img)
            self.labels.append(label)

    def _generate_enhanced_pattern(self, class_id):
        """为每个类别生成更复杂的增强模式"""
        img = torch.zeros(1, self.img_size, self.img_size)
        center = self.img_size // 2

        if class_id == 0:  # 坦克类型 - 矩形+圆形组合
            # 主体矩形
            h_start, h_end = center - 12, center + 12
            w_start, w_end = center - 18, center + 18
            img[0, h_start:h_end, w_start:w_end] = 1.0

            # 炮塔圆形
            y, x = torch.meshgrid(torch.arange(self.img_size), torch.arange(self.img_size), indexing='ij')
            mask = ((x - center) ** 2 + (y - center + 5) ** 2) < 6 ** 2
            img[0][mask] = 1.2

        elif class_id == 1:  # 装甲车 - 椭圆形
            y, x = torch.meshgrid(torch.arange(self.img_size), torch.arange(self.img_size), indexing='ij')
            mask = ((x - center) ** 2 / 15 ** 2 + (y - center) ** 2 / 10 ** 2) < 1
            img[0][mask] = 1.0

        elif class_id == 2:  # 飞机 - 十字形+菱形
            # 机身
            img[0, center - 2:center + 2, center - 20:center + 20] = 1.0
            # 机翼
            img[0, center - 15:center + 15, center - 3:center + 3] = 1.0

        elif class_id == 3:  # 船只 - 长椭圆
            y, x = torch.meshgrid(torch.arange(self.img_size), torch.arange(self.img_size), indexing='ij')
            mask = ((x - center) ** 2 / 25 ** 2 + (y - center) ** 2 / 8 ** 2) < 1
            img[0][mask] = 1.0

        elif class_id == 4:  # 卡车 - 多矩形组合
            # 驾驶室
            img[0, center - 8:center + 8, center - 15:center - 5] = 1.0
            # 货箱
            img[0, center - 6:center + 6, center - 5:center + 20] = 0.8

        elif class_id == 5:  # 雷达站 - 圆形+射线
            # 主圆
            y, x = torch.meshgrid(torch.arange(self.img_size), torch.arange(self.img_size), indexing='ij')
            mask = ((x - center) ** 2 + (y - center) ** 2) < 12 ** 2
            img[0][mask] = 1.0

            # 射线
            for angle in [0, 45, 90, 135]:
                rad = np.radians(angle)
                for r in range(12, 25):
                    x_pos = int(center + r * np.cos(rad))
                    y_pos = int(center + r * np.sin(rad))
                    if 0 <= x_pos < self.img_size and 0 <= y_pos < self.img_size:
                        img[0, y_pos, x_pos] = 0.6

        elif class_id == 6:  # 建筑 - 方形网格
            for i in range(center - 15, center + 15, 6):
                for j in range(center - 15, center + 15, 6):
                    img[0, i:i + 4, j:j + 4] = 1.0

        elif class_id == 7:  # 桥梁 - 直线+支撑
            img[0, center - 2:center + 2, :] = 1.0
            for x_pos in range(10, self.img_size - 10, 15):
                img[0, center - 10:center + 10, x_pos:x_pos + 2] = 0.8

        elif class_id == 8:  # 导弹发射器 - 复杂几何
            # 基座
            img[0, center - 5:center + 5, center - 10:center + 10] = 1.0
            # 发射管
            img[0, center - 15:center - 5, center - 2:center + 2] = 1.2

        else:  # class_id == 9 - 其他目标
            # 随机复杂形状
            np.random.seed(class_id * 123)
            num_shapes = np.random.randint(3, 6)
            for _ in range(num_shapes):
                x_c = np.random.randint(center - 15, center + 15)
                y_c = np.random.randint(center - 15, center + 15)
                size = np.random.randint(3, 8)
                intensity = np.random.uniform(0.6, 1.2)

                y, x = torch.meshgrid(torch.arange(self.img_size), torch.arange(self.img_size), indexing='ij')
                mask = ((x - x_c) ** 2 + (y - y_c) ** 2) < size ** 2
                img[0][mask] = intensity

        return img

    def _apply_augmentation(self, img):
        """应用数据增强"""
        # 随机旋转
        if random.random() > 0.5:
            angle = random.uniform(-15, 15)
            img = self._rotate_image(img, angle)

        # 随机缩放
        if random.random() > 0.5:
            scale = random.uniform(0.9, 1.1)
            img = self._scale_image(img, scale)

        # 随机平移
        if random.random() > 0.5:
            dx = random.randint(-3, 3)
            dy = random.randint(-3, 3)
            img = self._translate_image(img, dx, dy)

        return img

    def _rotate_image(self, img, angle):
        """旋转图像"""
        # 简单的旋转实现
        return img  # 为简化，这里不实现旋转

    def _scale_image(self, img, scale):
        """缩放图像"""
        # 简单的缩放实现
        return img * scale

    def _translate_image(self, img, dx, dy):
        """平移图像"""
        if dx == 0 and dy == 0:
            return img

        shifted = torch.zeros_like(img)
        h, w = img.shape[1], img.shape[2]

        # 计算有效区域
        src_x_start = max(0, -dx)
        src_x_end = min(w, w - dx)
        src_y_start = max(0, -dy)
        src_y_end = min(h, h - dy)

        dst_x_start = max(0, dx)
        dst_x_end = dst_x_start + (src_x_end - src_x_start)
        dst_y_start = max(0, dy)
        dst_y_end = dst_y_start + (src_y_end - src_y_start)

        shifted[0, dst_y_start:dst_y_end, dst_x_start:dst_x_end] = img[0, src_y_start:src_y_end, src_x_start:src_x_end]

        return shifted

    def __len__(self):
        return self.size

    def __getitem__(self, idx):
        return self.data[idx], self.labels[idx]


def get_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="雷达目标识别AConvNet模型训练和智能压缩实现")

    # 数据和输出参数
    parser.add_argument("--data-dir", "-d", default="./data",
                        help="SAR数据集目录路径")
    parser.add_argument("--output-dir", "-o", default="./output",
                        help="模型和结果的输出目录")
    parser.add_argument("--num-classes", type=int, default=10,
                        help="目标类别数量 (默认: 10)")

    # 训练参数
    parser.add_argument("--epochs", "-e", type=int, default=30,
                        help="训练轮数 (默认: 30)")
    parser.add_argument("--batch-size", "-b", type=int, default=32,
                        help="批次大小 (默认: 32)")
    parser.add_argument("--learning-rate", "-lr", type=float, default=0.001,
                        help="学习率 (默认: 0.001)")
    parser.add_argument("--weight-decay", type=float, default=1e-4,
                        help="权重衰减 (默认: 1e-4)")
    parser.add_argument("--dataset-size", type=int, default=3000,
                        help="数据集大小 (默认: 3000)")

    # 智能压缩参数
    parser.add_argument("--max-performance-loss", type=float, default=0.15,
                        help="最大允许的性能损失比例 (默认15%)")
    parser.add_argument("--target-compression-ratio", type=float, default=4.0,
                        help="目标压缩比 (默认4倍)")
    parser.add_argument("--compression-strategy", choices=['conservative', 'balanced', 'aggressive'],
                        default='balanced', help="压缩策略")

    # 传统压缩参数（用于手动模式）
    parser.add_argument("--bits", type=int, default=8,
                        help="量化位数（智能模式下会自动调整）")
    parser.add_argument("--sparsity", type=float, default=0.3,
                        help="剪枝稀疏度（智能模式下会自动调整）")

    # 模式选择
    parser.add_argument("--mode", choices=['train', 'compress', 'both'], default='both',
                        help="运行模式: train(仅训练), compress(仅压缩), both(训练+压缩)")
    parser.add_argument("--compression-mode", choices=['smart', 'manual'], default='smart',
                        help="压缩模式：smart=智能压缩，manual=手动参数")
    parser.add_argument("--pretrained-path", type=str, default=None,
                        help="预训练模型路径 (用于仅压缩模式)")

    return parser.parse_args()


def create_dataloaders(data_dir, batch_size=32, num_classes=10, dataset_size=3000):
    """创建SAR目标识别数据集加载器"""
    print("准备SAR目标识别数据集...")

    # 创建完整数据集
    full_dataset = ImprovedSARDataset(
        size=dataset_size,
        img_size=88,
        num_classes=num_classes,
        add_noise=True,
        augment=True
    )

    # 划分训练集和测试集 (80%训练, 20%测试)
    train_size = int(0.8 * len(full_dataset))
    test_size = len(full_dataset) - train_size
    train_dataset, test_dataset = random_split(full_dataset, [train_size, test_size])

    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=2,
        pin_memory=True
    )

    test_loader = DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=2,
        pin_memory=True
    )

    print(f"训练集大小: {len(train_dataset)}")
    print(f"测试集大小: {len(test_dataset)}")

    return train_loader, test_loader


def train_model(model, train_loader, test_loader, device, args):
    """训练AConvNet模型"""
    print("开始训练AConvNet模型...")

    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)

    # 改进的学习率调度
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[15, 25], gamma=0.1)

    # 记录训练历史
    train_losses = []
    train_accuracies = []
    test_accuracies = []

    best_test_acc = 0.0
    best_model_state = None
    patience_counter = 0
    patience = 10

    # 训练循环
    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        correct_train = 0
        total_train = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, (images, labels) in enumerate(pbar):
            images, labels = images.to(device), labels.to(device)

            # 前向传播
            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)

            # 反向传播
            loss.backward()

            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)

            optimizer.step()

            # 统计
            running_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total_train += labels.size(0)
            correct_train += (predicted == labels).sum().item()

            # 更新进度条
            pbar.set_postfix({
                'Loss': f'{loss.item():.4f}',
                'Acc': f'{100. * correct_train / total_train:.2f}%'
            })

        # 计算平均训练损失和准确率
        avg_train_loss = running_loss / len(train_loader)
        train_acc = correct_train / total_train

        # 测试阶段
        test_acc = evaluate_model(model, test_loader, device)

        # 记录历史
        train_losses.append(avg_train_loss)
        train_accuracies.append(train_acc)
        test_accuracies.append(test_acc)

        # 保存最佳模型
        if test_acc > best_test_acc:
            best_test_acc = test_acc
            best_model_state = copy.deepcopy(model.state_dict())
            patience_counter = 0
        else:
            patience_counter += 1

        # 更新学习率
        scheduler.step()

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train Acc: {train_acc:.4f}, '
              f'Test Acc: {test_acc:.4f}, '
              f'LR: {optimizer.param_groups[0]["lr"]:.6f}')

        # 早停检查
        if patience_counter >= patience:
            print(f"早停：{patience}轮无改善")
            break

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f'训练完成! 最佳测试准确率: {best_test_acc:.4f}')

    # 绘制训练曲线
    plot_training_curves(train_losses, train_accuracies, test_accuracies, args.output_dir)

    return model, best_test_acc, {
        'train_losses': train_losses,
        'train_accuracies': train_accuracies,
        'test_accuracies': test_accuracies,
        'best_test_acc': best_test_acc
    }


def evaluate_model(model, dataloader, device):
    """评估模型准确率"""
    model.eval()
    correct = 0
    total = 0

    with torch.no_grad():
        for images, labels in dataloader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = correct / total if total > 0 else 0
    return accuracy


def plot_training_curves(train_losses, train_accuracies, test_accuracies, output_dir):
    """绘制训练曲线"""
    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(12, 4))

    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(epochs, train_losses, 'b-', label='Training Loss')
    plt.title('Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)

    # 绘制准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(epochs, train_accuracies, 'b-', label='Training Accuracy')
    plt.plot(epochs, test_accuracies, 'r-', label='Test Accuracy')
    plt.title('Training and Test Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'training_curves.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"训练曲线已保存到: {os.path.join(output_dir, 'training_curves.png')}")


def get_model_size(model):
    """计算模型在内存中的大小（MB）"""
    param_size = 0
    for param in model.parameters():
        param_size += param.nelement() * param.element_size()
    size_mb = param_size / (1024 * 1024)
    return size_mb


def get_layer_importance_simple(model):
    """简单的层重要性计算方法（基于权重大小）"""
    layer_importance = {}

    for name, param in model.named_parameters():
        if param.requires_grad and 'weight' in name:
            # 基于权重的L2范数计算重要性
            importance = param.data.norm(p=2).item()
            layer_importance[name] = importance

    return layer_importance


def get_layer_importance(model, test_loader, device, sample_size=100):
    """计算各层的重要性得分，用于智能压缩"""
    try:
        # 保存原始模式
        original_mode = model.training
        model.train()
        layer_gradients = {}

        # 初始化梯度累积器
        for name, param in model.named_parameters():
            if param.requires_grad:
                layer_gradients[name] = torch.zeros_like(param)

        sample_count = 0
        with torch.enable_grad():
            for images, labels in test_loader:
                if sample_count >= sample_size:
                    break

                images, labels = images.to(device), labels.to(device)

                # 前向传播
                model.zero_grad()
                outputs = model(images)
                loss = nn.CrossEntropyLoss()(outputs, labels)

                # 反向传播
                loss.backward()

                # 累积梯度
                for name, param in model.named_parameters():
                    if param.grad is not None:
                        layer_gradients[name] += param.grad.abs()

                sample_count += images.shape[0]

        # 恢复原始模式
        model.train(original_mode)

        # 计算各层重要性得分
        layer_importance = {}
        for name, grad_sum in layer_gradients.items():
            layer_importance[name] = grad_sum.mean().item()

        return layer_importance

    except Exception as e:
        print(f"梯度计算方法失败: {e}")
        print("使用简单的权重范数方法计算层重要性...")
        # 恢复原始模式
        model.train(original_mode)
        return get_layer_importance_simple(model)


def adaptive_layer_compression(model, layer_importance, global_sparsity, global_bits):
    """基于层重要性的自适应压缩"""
    # 计算重要性分数的分位数
    importance_values = list(layer_importance.values())
    q75 = np.percentile(importance_values, 75)
    q25 = np.percentile(importance_values, 25)

    compressed_model = copy.deepcopy(model)
    compression_stats = []

    for name, param in compressed_model.named_parameters():
        if 'weight' in name and name in layer_importance:
            importance = layer_importance[name]

            # 根据重要性调整压缩参数
            if importance >= q75:  # 高重要性层，保守压缩
                layer_sparsity = max(0.1, global_sparsity * 0.5)
                layer_bits = max(8, global_bits + 2)
            elif importance <= q25:  # 低重要性层，激进压缩
                layer_sparsity = min(0.8, global_sparsity * 1.5)
                layer_bits = max(4, global_bits - 1)
            else:  # 中等重要性层，标准压缩
                layer_sparsity = global_sparsity
                layer_bits = global_bits

            # 应用压缩
            with torch.no_grad():
                # 剪枝
                pruned_weight, mask = prune_weights(param.data, layer_sparsity)
                # 量化
                quantized_weight, scale, zero_point, q_weight = quantize_weights(pruned_weight, layer_bits)
                param.copy_(quantized_weight)

            compression_stats.append({
                'layer': name,
                'importance': importance,
                'sparsity': layer_sparsity,
                'bits': layer_bits,
                'original_params': param.numel(),
                'nonzero_params': torch.count_nonzero(param).item()
            })

    return compressed_model, compression_stats


def smart_compression_search(model, test_loader, device, args):
    """智能压缩搜索，找到最佳的压缩参数组合"""
    print(f"开始智能压缩搜索...")
    print(f"目标: 性能损失 < {args.max_performance_loss * 100:.1f}%, 压缩率 > {args.target_compression_ratio:.1f}x")

    # 评估原始模型性能
    baseline_acc = evaluate_model(model, test_loader, device)
    baseline_size = get_model_size(model)

    print(f"基线性能: 准确率={baseline_acc:.4f}, 大小={baseline_size:.2f}MB")

    # 获取层重要性
    print("计算层重要性...")
    layer_importance = get_layer_importance(model, test_loader, device, sample_size=50)

    # 根据压缩策略定义搜索空间
    if args.compression_strategy == 'conservative':
        sparsity_candidates = [0.1, 0.2, 0.3, 0.4]
        bits_candidates = [8, 6]
    elif args.compression_strategy == 'balanced':
        sparsity_candidates = [0.2, 0.3, 0.4, 0.5, 0.6]
        bits_candidates = [8, 6, 5]
    else:  # aggressive
        sparsity_candidates = [0.4, 0.5, 0.6, 0.7, 0.8]
        bits_candidates = [6, 5, 4]

    best_config = None
    best_compression_ratio = 1.0
    search_results = []

    print(f"\n搜索压缩配置...")
    total_combinations = len(sparsity_candidates) * len(bits_candidates)

    with tqdm(total=total_combinations, desc="压缩搜索") as pbar:
        for sparsity in sparsity_candidates:
            for bits in bits_candidates:
                try:
                    # 创建压缩模型
                    start_time = time.time()
                    compressed_model, compression_stats = adaptive_layer_compression(
                        model, layer_importance, sparsity, bits
                    )
                    compression_time = time.time() - start_time

                    # 评估压缩后的性能
                    compressed_acc = evaluate_model(compressed_model, test_loader, device)
                    compressed_size = get_model_size(compressed_model)

                    # 计算性能损失和压缩率
                    acc_loss = (baseline_acc - compressed_acc) / baseline_acc
                    compression_ratio = baseline_size / compressed_size

                    # 记录结果
                    result = {
                        'sparsity': sparsity,
                        'bits': bits,
                        'accuracy': compressed_acc,
                        'acc_loss': acc_loss,
                        'compression_ratio': compression_ratio,
                        'compression_time': compression_time,
                        'size_mb': compressed_size,
                        'compression_stats': compression_stats
                    }
                    search_results.append(result)

                    # 检查是否满足约束条件
                    if (acc_loss <= args.max_performance_loss and
                            compression_ratio >= args.target_compression_ratio):
                        if compression_ratio > best_compression_ratio:
                            best_config = result
                            best_compression_ratio = compression_ratio

                    pbar.set_postfix({
                        'Acc Loss': f'{acc_loss * 100:.1f}%',
                        'Compression': f'{compression_ratio:.1f}x',
                        'Best': f'{best_compression_ratio:.1f}x' if best_config else 'None'
                    })

                except Exception as e:
                    print(f"压缩配置 (sparsity={sparsity}, bits={bits}) 失败: {e}")

                pbar.update(1)

    # 如果没有找到满足条件的配置，选择最佳的权衡
    if best_config is None:
        print("未找到满足所有约束的配置，选择最佳权衡...")
        valid_results = [r for r in search_results if r['acc_loss'] <= args.max_performance_loss * 1.5]
        if valid_results:
            best_config = max(valid_results, key=lambda x: x['compression_ratio'])
        else:
            best_config = min(search_results, key=lambda x: x['acc_loss'])

    # 输出搜索结果
    print(f"\n智能压缩搜索完成!")
    print(f"最佳配置: 稀疏度={best_config['sparsity']:.1f}, 量化位数={best_config['bits']}")
    print(f"性能: 准确率={best_config['accuracy']:.4f} (损失{best_config['acc_loss'] * 100:+.1f}%)")
    print(f"压缩率: {best_config['compression_ratio']:.1f}x")

    # 重新创建最佳压缩模型
    final_compressed_model, final_stats = adaptive_layer_compression(
        model, layer_importance, best_config['sparsity'], best_config['bits']
    )

    # 保存搜索结果
    search_results_path = os.path.join(args.output_dir, "compression_search_results.json")
    with open(search_results_path, 'w') as f:
        # 移除不可序列化的对象
        serializable_results = []
        for result in search_results:
            serializable_result = {k: v for k, v in result.items() if k != 'compression_stats'}
            serializable_results.append(serializable_result)
        json.dump({
            'search_results': serializable_results,
            'best_config': {k: v for k, v in best_config.items() if k != 'compression_stats'},
            'baseline_performance': {'accuracy': baseline_acc}
        }, f, indent=2)

    return final_compressed_model, best_config, search_results


def quantize_weights(weight, bits=8):
    """量化权重到指定位数"""
    qmin, qmax = 0, 2 ** bits - 1
    min_val, max_val = weight.min(), weight.max()

    if min_val == max_val:
        return weight.clone(), None, None, None

    scale = (max_val - min_val) / (qmax - qmin)
    zero_point = qmin - min_val / scale

    q_weight = torch.round(weight / scale + zero_point)
    q_weight = torch.clamp(q_weight, qmin, qmax)
    dq_weight = (q_weight - zero_point) * scale

    return dq_weight, scale, zero_point, q_weight


def prune_weights(weight, sparsity=0.7):
    """按稀疏度修剪权重"""
    if sparsity <= 0:
        return weight.clone(), None

    weight_abs = weight.abs().flatten()
    k = int(weight_abs.numel() * sparsity)

    if k >= weight_abs.numel():
        return torch.zeros_like(weight), torch.zeros_like(weight).bool()

    threshold = torch.kthvalue(weight_abs, k).values
    mask = (weight.abs() >= threshold)
    pruned = weight * mask.float()

    return pruned, mask


def compress_model(model, bits=4, sparsity=0.7):
    """传统压缩模型方法（用于手动模式）"""
    print(f"压缩模型（量化位数={bits}, 剪枝稀疏度={sparsity}）...")
    compressed_model = copy.deepcopy(model)

    total_elements = 0
    zero_elements = 0

    for name, param in compressed_model.named_parameters():
        if 'weight' in name:
            with torch.no_grad():
                total_elements += param.numel()

                pruned_weight, mask = prune_weights(param.data, sparsity)
                zero_elements += param.numel() - torch.count_nonzero(pruned_weight).item()

                quantized_weight, scale, zero_point, q_weight = quantize_weights(pruned_weight, bits)
                param.copy_(quantized_weight)

    overall_sparsity = zero_elements / total_elements if total_elements > 0 else 0
    print(f"整体稀疏度: {overall_sparsity:.4f}")

    return compressed_model, overall_sparsity


def save_compressed_model(model, path, compress=True):
    """保存压缩模型"""
    state_dict = model.state_dict()
    temp_path = path + ".temp"
    torch.save(state_dict, temp_path)

    if compress:
        with open(temp_path, 'rb') as f_in:
            with gzip.open(path, 'wb', compresslevel=9) as f_out:
                f_out.write(f_in.read())
        os.remove(temp_path)
    else:
        os.rename(temp_path, path)

    return os.path.getsize(path)


def load_compressed_model(path, args, device=None):
    """加载压缩模型"""
    try:
        with gzip.open(path, 'rb') as f:
            temp_path = path + ".temp"
            with open(temp_path, 'wb') as temp_f:
                temp_f.write(f.read())

            # 尝试使用weights_only参数（较新版本的PyTorch）
            try:
                state_dict = torch.load(temp_path, map_location='cpu', weights_only=True)
            except TypeError:
                # 如果不支持weights_only参数，使用传统方法
                state_dict = torch.load(temp_path, map_location='cpu')

            os.remove(temp_path)

            model = AConvNet(num_classes=args.num_classes)
            model.load_state_dict(state_dict)

            if device:
                model = model.to(device)

            return model

    except gzip.BadGzipFile:
        # 尝试使用weights_only参数（较新版本的PyTorch）
        try:
            state_dict = torch.load(path, map_location='cpu', weights_only=True)
        except TypeError:
            # 如果不支持weights_only参数，使用传统方法
            state_dict = torch.load(path, map_location='cpu')

        model = AConvNet(num_classes=args.num_classes)
        model.load_state_dict(state_dict)

        if device:
            model = model.to(device)

        return model


def plot_compression_analysis(search_results, output_dir):
    """绘制压缩分析图表"""
    if not search_results:
        return

    fig, axes = plt.subplots(2, 2, figsize=(12, 10))
    fig.suptitle('AConvNet Compression Analysis', fontsize=16)

    # 提取数据
    acc_losses = [r['acc_loss'] * 100 for r in search_results]
    compression_ratios = [r['compression_ratio'] for r in search_results]
    sparsities = [r['sparsity'] * 100 for r in search_results]
    bits = [r['bits'] for r in search_results]

    # 性能损失 vs 压缩率
    axes[0, 0].scatter(compression_ratios, acc_losses, c=sparsities, cmap='viridis', alpha=0.7)
    axes[0, 0].set_xlabel('Compression Ratio')
    axes[0, 0].set_ylabel('Accuracy Loss (%)')
    axes[0, 0].set_title('Performance vs Compression Trade-off')
    axes[0, 0].grid(True)
    cbar1 = plt.colorbar(axes[0, 0].collections[0], ax=axes[0, 0])
    cbar1.set_label('Sparsity (%)')

    # 稀疏度 vs 性能损失
    axes[0, 1].scatter(sparsities, acc_losses, c=bits, cmap='plasma', alpha=0.7)
    axes[0, 1].set_xlabel('Sparsity (%)')
    axes[0, 1].set_ylabel('Accuracy Loss (%)')
    axes[0, 1].set_title('Sparsity vs Performance Loss')
    axes[0, 1].grid(True)
    cbar2 = plt.colorbar(axes[0, 1].collections[0], ax=axes[0, 1])
    cbar2.set_label('Quantization Bits')

    # 量化位数 vs 压缩率
    unique_bits = sorted(list(set(bits)))
    bit_compression_ratios = []
    bit_labels = []
    for bit in unique_bits:
        ratios = [r['compression_ratio'] for r in search_results if r['bits'] == bit]
        bit_compression_ratios.append(ratios)
        bit_labels.append(f'{bit}-bit')

    if bit_compression_ratios:
        axes[1, 0].boxplot(bit_compression_ratios, labels=bit_labels)
        axes[1, 0].set_xlabel('Quantization Bits')
        axes[1, 0].set_ylabel('Compression Ratio')
        axes[1, 0].set_title('Quantization Impact on Compression')
        axes[1, 0].grid(True)

    # 稀疏度 vs 压缩率
    sparsity_levels = sorted(list(set([r['sparsity'] for r in search_results])))
    sparsity_compression_ratios = []
    sparsity_labels = []
    for sparsity in sparsity_levels:
        ratios = [r['compression_ratio'] for r in search_results if r['sparsity'] == sparsity]
        sparsity_compression_ratios.append(ratios)
        sparsity_labels.append(f'{sparsity * 100:.0f}%')

    if sparsity_compression_ratios:
        axes[1, 1].boxplot(sparsity_compression_ratios, labels=sparsity_labels)
        axes[1, 1].set_xlabel('Sparsity')
        axes[1, 1].set_ylabel('Compression Ratio')
        axes[1, 1].set_title('Sparsity Impact on Compression')
        axes[1, 1].grid(True)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'compression_analysis.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"压缩分析图表已保存到: {os.path.join(output_dir, 'compression_analysis.png')}")


def visualize_sample_predictions(model, test_loader, device, output_dir, num_samples=8):
    """可视化部分预测结果"""
    model.eval()

    fig, axes = plt.subplots(2, 4, figsize=(16, 8))
    fig.suptitle('SAR Target Recognition Sample Predictions', fontsize=16)
    axes = axes.flatten()

    class_names = ['Tank', 'APC', 'Aircraft', 'Ship', 'Truck',
                   'Radar', 'Building', 'Bridge', 'Launcher', 'Other']

    with torch.no_grad():
        for i, (images, labels) in enumerate(test_loader):
            if i >= num_samples:
                break

            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs, 1)

            # 取第一个样本
            img = images[0].cpu().squeeze().numpy()
            true_label = labels[0].cpu().item()
            pred_label = predicted[0].cpu().item()
            confidence = F.softmax(outputs[0], dim=0)[pred_label].cpu().item()

            ax = axes[i]
            ax.imshow(img, cmap='gray')
            ax.set_title(f'True: {class_names[true_label]}\n'
                         f'Pred: {class_names[pred_label]} ({confidence:.2f})',
                         color='green' if true_label == pred_label else 'red')
            ax.axis('off')

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'sample_predictions.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"样本预测结果已保存到: {os.path.join(output_dir, 'sample_predictions.png')}")


def main():
    """主函数"""
    args = get_args()

    # 确保输出目录存在
    os.makedirs(args.output_dir, exist_ok=True)

    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 创建或加载模型
    model = AConvNet(num_classes=args.num_classes)
    model = model.to(device)

    trained_model_path = os.path.join(args.output_dir, "aconvnet_trained.pth")
    training_history = None

    # 训练模式
    if args.mode in ['train', 'both']:
        print("=" * 50)
        print("开始训练阶段")
        print("=" * 50)

        # 创建数据加载器
        train_loader, test_loader = create_dataloaders(
            args.data_dir, args.batch_size, args.num_classes, args.dataset_size
        )

        # 训练模型
        model, best_acc, training_history = train_model(
            model, train_loader, test_loader, device, args
        )

        # 保存训练好的模型
        torch.save(model.state_dict(), trained_model_path)
        print(f"训练好的模型已保存到: {trained_model_path}")

        # 可视化预测结果
        visualize_sample_predictions(model, test_loader, device, args.output_dir)

        # 保存训练历史
        history_path = os.path.join(args.output_dir, "training_history.json")
        with open(history_path, 'w') as f:
            json.dump(training_history, f, indent=2)

    # 仅压缩模式：加载预训练模型
    elif args.mode == 'compress':
        if args.pretrained_path and os.path.exists(args.pretrained_path):
            model.load_state_dict(torch.load(args.pretrained_path, map_location=device, weights_only=False))
            print(f"已加载预训练模型: {args.pretrained_path}")
        elif os.path.exists(trained_model_path):
            model.load_state_dict(torch.load(trained_model_path, map_location=device, weights_only=False))
            print(f"已加载训练好的模型: {trained_model_path}")
        else:
            print("警告: 没有找到预训练模型，将使用随机初始化的模型进行压缩")

    # 压缩模式
    if args.mode in ['compress', 'both']:
        print("=" * 50)
        print(f"开始{'智能' if args.compression_mode == 'smart' else '手动'}压缩阶段")
        print("=" * 50)

        # 创建测试数据加载器用于评估
        _, test_loader = create_dataloaders(
            args.data_dir, args.batch_size, args.num_classes, args.dataset_size
        )

        # 评估原始模型
        print("评估原始模型...")
        original_accuracy = evaluate_model(model, test_loader, device)
        original_size = get_model_size(model)

        # 保存原始模型并计算文件大小
        original_path = os.path.join(args.output_dir, "aconvnet_original.pth")
        original_file_size_bytes = save_compressed_model(model, original_path, compress=False)
        original_file_size = original_file_size_bytes / (1024 * 1024)

        print(f"原始模型准确率: {original_accuracy:.4f}")
        print(f"原始模型内存大小: {original_size:.2f} MB")
        print(f"原始模型文件大小: {original_file_size:.2f} MB")

        # 选择压缩方法
        if args.compression_mode == 'smart':
            # 智能压缩
            compressed_model, best_config, search_results = smart_compression_search(
                model, test_loader, device, args
            )
            actual_sparsity = best_config['sparsity']
            compression_bits = best_config['bits']

            # 绘制压缩分析图表
            plot_compression_analysis(search_results, args.output_dir)

        else:
            # 手动压缩
            print(f"使用手动压缩参数: 稀疏度={args.sparsity}, 量化位数={args.bits}")
            start_time = time.time()
            compressed_model, actual_sparsity = compress_model(model, args.bits, args.sparsity)
            compression_time = time.time() - start_time
            print(f"压缩完成，耗时: {compression_time:.2f} 秒")
            compression_bits = args.bits
            best_config = {
                'sparsity': args.sparsity,
                'bits': args.bits,
                'compression_time': compression_time
            }
            search_results = []

        # 评估压缩后的模型
        print("评估压缩后的模型...")
        compressed_model = compressed_model.to(device)
        compressed_accuracy = evaluate_model(compressed_model, test_loader, device)

        # 计算压缩后的大小
        compressed_size = get_model_size(compressed_model)

        # 保存压缩模型并计算实际文件大小
        compressed_path = os.path.join(args.output_dir, "aconvnet_compressed.pth")
        compressed_file_size_bytes = save_compressed_model(compressed_model, compressed_path, compress=True)
        compressed_file_size = compressed_file_size_bytes / (1024 * 1024)

        print(f"压缩后模型准确率: {compressed_accuracy:.4f}")
        print(f"压缩后模型内存大小: {compressed_size:.2f} MB")
        print(f"压缩后模型文件大小: {compressed_file_size:.2f} MB")
        print(f"实际稀疏度: {actual_sparsity:.4f}")

        # 计算压缩率和性能变化
        file_compression_ratio = original_file_size / compressed_file_size
        acc_change = compressed_accuracy - original_accuracy
        acc_change_percent = (acc_change / original_accuracy) * 100

        # 测试加载压缩模型
        print("测试加载压缩模型...")
        loaded_model = load_compressed_model(compressed_path, args, device)
        loaded_accuracy = evaluate_model(loaded_model, test_loader, device)
        print(f"加载后模型准确率: {loaded_accuracy:.4f}")

        # ========== 新增：全面评估部分 ==========
        print("\n" + "=" * 60)
        print("🔍 开始全面模型评估")
        print("=" * 60)

        # 执行全面评估
        evaluation_results = evaluate_compression_pipeline(
            original_model=model,
            compressed_model=loaded_model,
            test_loader=test_loader,
            original_model_path=original_path,
            compressed_model_path=compressed_path,
            output_dir=args.output_dir
        )

        # 保存完整的结果信息
        results = {
            'model': 'AConvNet',
            'mode': args.mode,
            'compression_mode': args.compression_mode,
            'num_classes': args.num_classes,
            'model_params': {
                'num_classes': args.num_classes,
                'dataset_size': args.dataset_size
            },
            'training_params': {
                'epochs': args.epochs,
                'batch_size': args.batch_size,
                'learning_rate': args.learning_rate,
                'dataset_size': args.dataset_size
            } if args.mode in ['train', 'both'] else None,
            'compression_params': {
                'final_bits': compression_bits,
                'final_sparsity': float(actual_sparsity),
                'max_performance_loss': args.max_performance_loss,
                'target_compression_ratio': args.target_compression_ratio,
                'compression_strategy': args.compression_strategy
            },
            'model_sizes': {
                'original_memory_mb': float(original_size),
                'original_file_mb': float(original_file_size),
                'compressed_memory_mb': float(compressed_size),
                'compressed_file_mb': float(compressed_file_size),
                'file_compression_ratio': float(file_compression_ratio)
            },
            'performance_metrics': {
                'original_accuracy': float(original_accuracy),
                'compressed_accuracy': float(compressed_accuracy),
                'loaded_accuracy': float(loaded_accuracy),
                'accuracy_change': float(acc_change),
                'accuracy_change_percent': float(acc_change_percent)
            },
            'training_history': training_history,
            'best_compression_config': best_config,
            'comprehensive_evaluation': evaluation_results
        }

        # 保存结果
        results_path = os.path.join(args.output_dir, "complete_results.json")
        with open(results_path, 'w') as f:
            json.dump(results, f, indent=2)

        # 输出最终统计
        print("\n" + "=" * 60)
        print("🎯 AConvNet 模型压缩最终报告")
        print("=" * 60)

        # 从全面评估结果中获取对比数据
        comparison = evaluation_results['comparison']
        original_perf = evaluation_results['original_model']['performance_metrics']
        compressed_perf = evaluation_results['compressed_model']['performance_metrics']

        print(f"\n📈 关键指标总结:")
        if training_history:
            print(
                f"  训练最佳准确率:         {training_history['best_test_acc']:.4f} ({training_history['best_test_acc'] * 100:.2f}%)")
        print(f"  原始模型准确率:         {original_perf['accuracy']:.4f} ({original_perf['accuracy'] * 100:.2f}%)")
        print(f"  压缩模型准确率:         {compressed_perf['accuracy']:.4f} ({compressed_perf['accuracy'] * 100:.2f}%)")
        print(f"  准确率损失:             {comparison['accuracy_loss_pct']:.2f}%")
        print(f"  文件压缩率:             {comparison['size_compression_ratio']:.2f}x")
        print(f"  推理加速:               {comparison['speed_improvement_ratio']:.2f}x")
        print(f"  内存节省:               {comparison['memory_reduction_ratio']:.2f}x")
        print(f"  压缩效率得分:           {comparison['compression_score']:.2f}")

        print(f"\n🏆 综合评价:")
        print(f"  推荐等级:               {comparison['recommendation']}")

        # 给出建议
        if comparison['accuracy_loss_pct'] < 10 and comparison['size_compression_ratio'] > 3:
            print(f"\n✅ 压缩效果优秀！建议部署使用。")
        elif comparison['accuracy_loss_pct'] < 15 and comparison['size_compression_ratio'] > 2:
            print(f"\n⚠️  压缩效果可接受，可根据应用场景决定是否使用。")
        else:
            print(f"\n❌ 压缩效果不理想，建议调整压缩策略。")
            print(f"   建议：减少稀疏度或提高量化位数")

        print(f"\n📄 详细评估报告:")
        print(f"  完整结果:               {results_path}")
        print(f"  全面评估:               {os.path.join(args.output_dir, 'comprehensive_evaluation.json')}")
        print(f"  压缩搜索记录:           {os.path.join(args.output_dir, 'compression_search_results.json')}")

    print(f"\n🎉 AConvNet模型训练和智能压缩完成！")


if __name__ == "__main__":
    main()