"""
改进的遥感图像分类模型训练和压缩示例 - ResNet50版本
解决过拟合问题，优化压缩策略，提升模型性能
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import DataLoader, Dataset, random_split
import copy
import time
import json
import gzip
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from collections import OrderedDict
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib

matplotlib.use('Agg')  # 使用非交互式后端
plt.rcParams['font.sans-serif'] = ['DejaVu Sans', 'Arial', 'sans-serif']  # 设置字体

@torch.no_grad()
def parameter_sharing_kmeans(model: nn.Module,
                             k: int = 32,
                             iters: int = 10,
                             apply_to=('Conv2d', 'Linear'),
                             verbose: bool = True):
    """
    使用简易 K-Means 对每个层的权重做“值共享”（weight sharing）。
    - 不引入第三方依赖（纯 PyTorch 实现简化版 K-Means）
    - 对 Conv2d / Linear 的 weight 分别量化为 k 个共享值
    - 量化后直接写回权重（bias 不动）
    - 返回同一个 model（in-place 修改）

    参数:
      k      : 每层 codebook 的大小（共享值个数）
      iters  : 每层 K-Means 迭代次数（10 足够快）
      apply_to: 生效的层类型元组（字符串，类名匹配）
    """
    def _kmeans_1d(x, k, iters):
        """对 1D 向量 x 做 K-Means，返回 centroids 和 每个元素的分配 index。"""
        # 随机初始化质心（从数据中采样 k 个点）
        # 为防少量重复，使用随机索引
        if x.numel() < k:
            # 元素非常少时退化为取唯一值
            uniq = torch.unique(x)
            centroids = uniq
            assign = torch.bucketize(x, torch.sort(uniq).values) - 1
            return centroids, assign.clamp_(0, len(uniq)-1)

        idx = torch.randperm(x.numel(), device=x.device)[:k]
        centroids = x[idx].clone()

        for _ in range(iters):
            # 分配
            # (n,1) 与 (k,) 计算距离
            # 为省显存，这里用广播方式：|x - c|^2
            dist = (x.unsqueeze(1) - centroids.unsqueeze(0)) ** 2
            assign = dist.argmin(dim=1)

            # 更新质心
            for j in range(k):
                mask = (assign == j)
                if mask.any():
                    centroids[j] = x[mask].mean()
                # 如果某个簇空了，就把它重置为随机一个点
                else:
                    centroids[j] = x[torch.randint(0, x.numel(), (1,), device=x.device)]
        return centroids, assign

    n_layers = 0
    for name, module in model.named_modules():
        if module.__class__.__name__ in apply_to and hasattr(module, 'weight') and module.weight is not None:
            W = module.weight.data.view(-1)  # 展平到 1D
            if W.numel() == 0:
                continue

            if verbose:
                print(f"[PS] Layer: {name:30s}  |  params: {W.numel():,}  ->  codebook: {k}")

            # 运行简易 K-Means
            centroids, assign = _kmeans_1d(W, k, iters)

            # 量化：用质心替代
            quantized = centroids[assign].view_as(module.weight.data)
            module.weight.data.copy_(quantized)
            n_layers += 1

    if verbose:
        print(f"[PS] 完成参数共享（K-Means）层数: {n_layers}")
    return model

def get_args():
    """解析命令行参数（Parameter Sharing 版：K-Means 权重共享）"""
    import os
    import argparse

    parser = argparse.ArgumentParser(description="遥感分类训练 + 参数共享压缩（K-Means Weight Sharing）")

    script_dir = os.path.dirname(os.path.abspath(__file__))
    default_data_dir = os.path.normpath(
        os.path.join(script_dir, ".." , "data_split")
    )
    # 环境变量覆盖（可选）
    env_data_dir = os.environ.get("RS_DATA_ROOT")
    if env_data_dir and os.path.isdir(env_data_dir):
        default_data_dir = env_data_dir

    # ===== 模型/数据/输出 =====
    parser.add_argument("--model", "-m", default="resnet50",
                        choices=["resnet18", "resnet50", "resnet101", "efficientnet_b0"],
                        help="要使用的模型架构")
    parser.add_argument("--data-dir", "-d", default=default_data_dir,
                        help="数据集根目录（应包含 train/ 与 test/ 子目录）")
    parser.add_argument("--output-dir", "-o", default="./output",
                        help="模型与结果输出目录")
    parser.add_argument("--num-classes", type=int, default=10,
                        help="分类类别数（若与数据集解析结果不一致，将以数据集为准）")

    # ===== 训练参数 =====
    parser.add_argument("--epochs", "-e", type=int, default=25, help="训练轮数")
    parser.add_argument("--batch-size", "-b", type=int, default=16, help="批次大小")
    parser.add_argument("--learning-rate", "-lr", type=float, default=1e-3, help="基础学习率")
    parser.add_argument("--weight-decay", type=float, default=5e-4, help="权重衰减")
    parser.add_argument("--patience", type=int, default=8, help="early stopping patience（若后续启用早停时使用）")
    parser.add_argument("--img-size", type=int, default=224, help="输入图像尺寸")

    # ===== 模式与预训练 =====
    parser.add_argument("--mode", choices=['train', 'compress', 'both'], default='both',
                        help="运行模式：仅训练/仅参数共享压缩/两者都运行")
    parser.add_argument("--pretrained-path", type=str, default=None,
                        help="（可选）自定义预训练权重路径（.pth），若提供则会加载（strict=False）")
    parser.add_argument("--use-pretrained", action="store_true", default=True,
                        help="使用ImageNet预训练权重（默认开启）")

    # ===== 参数共享（K-Means Weight Sharing）参数 =====
    parser.add_argument("--num-shares", type=int, default=32,
                        help="每层共享值数量 K（K-Means 簇数）")
    parser.add_argument("--ps-iters", type=int, default=10,
                        help="K-Means 迭代次数")
    parser.add_argument("--ps-apply-to", choices=["linear", "conv2d", "both"], default="both",
                        help="对哪些层做权重共享")
    # 解析
    args = parser.parse_args()

    # 把字符串选择映射成 parameter_sharing_kmeans 需要的层名元组
    apply_map = {
        "linear": ("Linear",),
        "conv2d": ("Conv2d",),
        "both":   ("Conv2d", "Linear"),
    }
    args.ps_apply_to = apply_map[args.ps_apply_to]

    # 友好提示：路径不存在时打印提示
    if not os.path.isdir(args.data_dir):
        print(f"[提示] 数据目录不存在: {args.data_dir}")
        print(r"👉 请用 --data-dir 或环境变量 RS_DATA_ROOT 指定，例如：")
        print(r'   python main.py --data-dir "E:\USM\year2sem3\实习数据集\data_split"')

    return args

def load_resnet_model(model_name, num_classes=10, use_pretrained=True):
    """加载带有Dropout的ResNet模型"""
    print(f"加载 {model_name} 模型...")

    if model_name == "resnet18":
        from torchvision.models import resnet18, ResNet18_Weights
        if use_pretrained:
            model = resnet18(weights=ResNet18_Weights.IMAGENET1K_V1)
            print("使用ImageNet预训练的ResNet18")
        else:
            model = resnet18(weights=None)
            print("使用随机初始化的ResNet18")

        # 添加Dropout的分类器
        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "resnet50":
        from torchvision.models import resnet50, ResNet50_Weights
        if use_pretrained:
            model = resnet50(weights=ResNet50_Weights.IMAGENET1K_V2)
            print("使用ImageNet预训练的ResNet50")
        else:
            model = resnet50(weights=None)
            print("使用随机初始化的ResNet50")

        # 添加Dropout的分类器
        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "resnet101":
        from torchvision.models import resnet101, ResNet101_Weights
        if use_pretrained:
            model = resnet101(weights=ResNet101_Weights.IMAGENET1K_V2)
            print("使用ImageNet预训练的ResNet101")
        else:
            model = resnet101(weights=None)
            print("使用随机初始化的ResNet101")

        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "efficientnet_b0":
        from torchvision.models import efficientnet_b0, EfficientNet_B0_Weights
        if use_pretrained:
            model = efficientnet_b0(weights=EfficientNet_B0_Weights.IMAGENET1K_V1)
            print("使用ImageNet预训练的EfficientNet-B0")
        else:
            model = efficientnet_b0(weights=None)
            print("使用随机初始化的EfficientNet-B0")

        model.classifier = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.classifier[1].in_features, num_classes)
        )

    else:
        raise ValueError(f"不支持的模型: {model_name}")

    return model

def create_dataloaders(data_dir, batch_size=16, img_size=224, num_classes=10):
    """使用真实数据集（ImageFolder）的数据加载器"""
    print("准备真实遥感图像分类数据集 (ImageFolder)...")
    train_dir = os.path.join(data_dir, "train")
    test_dir  = os.path.join(data_dir, "test")

    train_tf = transforms.Compose([
        transforms.Resize(int(img_size*1.15)),
        transforms.CenterCrop(img_size),
        transforms.RandomHorizontalFlip(0.5),
        transforms.RandomVerticalFlip(0.1),
        transforms.ColorJitter(0.2,0.2,0.2,0.05),
        transforms.ToTensor(),
        transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225]),
    ])
    test_tf = transforms.Compose([
        transforms.Resize(int(img_size*1.15)),
        transforms.CenterCrop(img_size),
        transforms.ToTensor(),
        transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225]),
    ])

    train_dataset = datasets.ImageFolder(train_dir, transform=train_tf)
    test_dataset  = datasets.ImageFolder(test_dir,  transform=test_tf)

    class_names = train_dataset.classes
    print(f"发现的类: {class_names}（共 {len(class_names)} 类）")
    if num_classes != len(class_names):
        print(f"[提示] --num-classes 建议设为 {len(class_names)}")

    pin = torch.cuda.is_available()
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                              num_workers=2, pin_memory=pin, drop_last=True)
    test_loader  = DataLoader(test_dataset,  batch_size=batch_size, shuffle=False,
                              num_workers=2, pin_memory=pin)
    print(f"训练集大小: {len(train_dataset)} | 测试集大小: {len(test_dataset)}")
    return train_loader, test_loader

def train_model(model, train_loader, test_loader, device, args):
    """改进的训练函数，解决过拟合问题"""
    print("开始训练ResNet模型...")

    # 损失函数（带标签平滑）
    criterion = nn.CrossEntropyLoss(label_smoothing=0.1)

    # 分层学习率设置
    if args.use_pretrained:
        backbone_params, classifier_params = [], []
        for name, param in model.named_parameters():
            if 'fc' in name:
                classifier_params.append(param)
            else:
                backbone_params.append(param)
        optimizer = optim.AdamW([
            {'params': backbone_params, 'lr': args.learning_rate * 0.1},
            {'params': classifier_params, 'lr': args.learning_rate}
        ], weight_decay=args.weight_decay)
    else:
        optimizer = optim.AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)

    # One-Cycle 调度器（按 epoch 更新）
    scheduler = optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=args.learning_rate,
        epochs=args.epochs,
        steps_per_epoch=len(train_loader),
        pct_start=0.3
    )

    train_losses, train_accuracies, test_accuracies = [], [], []
    best_test_acc = 0.0
    best_model_state = None

    for epoch in range(args.epochs):
        model.train()
        running_loss = 0.0
        correct_train = 0
        total_train = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for images, labels in pbar:
            images, labels = images.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()

            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)

            optimizer.step()
            # —— 不要在这里 scheduler.step() ——

            # 记录
            running_loss += loss.item()
            pred = outputs.argmax(dim=1)
            total_train += labels.size(0)
            correct_train += (pred == labels).sum().item()

            current_lr = scheduler.get_last_lr()[0]
            pbar.set_postfix({
                'Loss': f'{loss.item():.4f}',
                'Acc': f'{100. * correct_train / total_train:.2f}%',
                'LR': f'{current_lr:.6f}'
            })

        # —— Epoch 结束后更新学习率
        scheduler.step()

        # 验证
        avg_train_loss = running_loss / len(train_loader)
        train_acc = correct_train / total_train
        test_acc = evaluate_model(model, test_loader, device)

        train_losses.append(avg_train_loss)
        train_accuracies.append(train_acc)
        test_accuracies.append(test_acc)

        if test_acc > best_test_acc:
            best_test_acc = test_acc
            best_model_state = copy.deepcopy(model.state_dict())

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train Acc: {train_acc:.4f}, '
              f'Test Acc: {test_acc:.4f}, '
              f'Best Test Acc: {best_test_acc:.4f}')

    # 加载表现最好的模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f'训练完成! 最佳测试准确率: {best_test_acc:.4f}')

    # 绘制曲线
    plot_training_curves(train_losses, train_accuracies, test_accuracies, args.output_dir)

    return model, best_test_acc, {
        'train_losses': train_losses,
        'train_accuracies': train_accuracies,
        'test_accuracies': test_accuracies,
        'best_test_acc': best_test_acc
    }



def evaluate_model(model, dataloader, device):
    """评估分类模型准确率"""
    model.eval()
    correct = 0
    total = 0

    with torch.no_grad():
        for images, labels in dataloader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = correct / total if total > 0 else 0
    return accuracy

def comprehensive_evaluation(model, dataloader, device, class_names):
    """性能 + 效率综合评估（含 参数数量/模型大小/峰值显存）"""
    import numpy as np
    from sklearn.metrics import accuracy_score, precision_recall_fscore_support, confusion_matrix

    model.eval()
    y_true, y_pred = [], []

    # 记录显存峰值（仅 CUDA 有效）
    use_cuda = (device.type == 'cuda' and torch.cuda.is_available())
    if use_cuda:
        torch.cuda.reset_peak_memory_stats(device)

    with torch.no_grad():
        for images, labels in dataloader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, preds = torch.max(outputs, 1)
            y_true.extend(labels.detach().cpu().numpy().tolist())
            y_pred.extend(preds.detach().cpu().numpy().tolist())

    # —— 性能指标 —— #
    accuracy = float(accuracy_score(y_true, y_pred))
    precision, recall, f1, _ = precision_recall_fscore_support(
        y_true, y_pred, average='macro', zero_division=0)
    precision = float(precision); recall = float(recall); f1 = float(f1)
    cm = confusion_matrix(y_true, y_pred).astype(int).tolist()
    y_true = [int(x) for x in y_true]; y_pred = [int(x) for x in y_pred]

    # —— 效率指标 —— #
    num_params = int(sum(p.numel() for p in model.parameters()))
    size_mb = float(get_model_size(model))  # 你的工具函数
    gpu_mem_mb = float(torch.cuda.max_memory_allocated(device) / (1024 * 1024)) if use_cuda else None

    return {
        'accuracy': accuracy,
        'precision': precision,
        'recall': recall,
        'f1_score': f1,
        'confusion_matrix': cm,
        'y_true': y_true,
        'y_pred': y_pred,
        # 下面三项对应你图里的“模型效率”
        'num_params': num_params,          # 参数数量
        'model_size_mb': size_mb,          # 模型大小(MB)
        'gpu_peak_mem_mb': gpu_mem_mb      # 内存/显存占用（评估峰值，CPU 为 None）
    }

def plot_training_curves(train_losses, train_accuracies, test_accuracies, output_dir):
    """绘制训练曲线"""
    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(15, 5))

    # 绘制损失曲线
    plt.subplot(1, 3, 1)
    plt.plot(epochs, train_losses, 'b-', label='Training Loss', linewidth=2)
    plt.title('Training Loss', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Loss', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 绘制准确率曲线
    plt.subplot(1, 3, 2)
    plt.plot(epochs, train_accuracies, 'b-', label='Training Accuracy', linewidth=2)
    plt.plot(epochs, test_accuracies, 'r-', label='Test Accuracy', linewidth=2)
    plt.title('Training and Test Accuracy', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Accuracy', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 绘制过拟合分析
    plt.subplot(1, 3, 3)
    overfitting_gap = [train_acc - test_acc for train_acc, test_acc in zip(train_accuracies, test_accuracies)]
    plt.plot(epochs, overfitting_gap, 'g-', label='Overfitting Gap', linewidth=2)
    plt.title('Overfitting Analysis', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Train Acc - Test Acc', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.axhline(y=0, color='k', linestyle='--', alpha=0.5)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'training_curves.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"训练曲线已保存到: {os.path.join(output_dir, 'training_curves.png')}")

def visualize_samples(dataloader, output_dir, num_samples=16):
    """可视化数据样本（真实数据集）"""
    class_names = dataloader.dataset.classes
    rows = cols = 4
    fig, axes = plt.subplots(rows, cols, figsize=(4*cols, 4*rows))
    axes = np.array(axes).reshape(-1)

    images, labels = next(iter(dataloader))
    mean = torch.tensor([0.485,0.456,0.406]).view(3,1,1)
    std  = torch.tensor([0.229,0.224,0.225]).view(3,1,1)

    k = min(num_samples, len(images), len(axes))
    for i in range(k):
        img = images[i].clone()*std + mean
        img = torch.clamp(img, 0, 1).permute(1,2,0).numpy()
        axes[i].imshow(img)
        axes[i].set_title(f'{class_names[labels[i]]}', fontsize=11)
        axes[i].axis('off')
    for j in range(k, len(axes)):
        axes[j].axis('off')

    path = os.path.join(output_dir, 'data_samples.png')
    plt.tight_layout()
    plt.savefig(path, dpi=300, bbox_inches='tight')
    plt.close()
    print(f"数据样本已保存到: {path}")

def get_model_size(model):
    """计算模型在内存中的大小（MB）"""
    param_size = 0
    buffer_size = 0

    for param in model.parameters():
        param_size += param.nelement() * param.element_size()

    for buffer in model.buffers():
        buffer_size += buffer.nelement() * buffer.element_size()

    total_size = param_size + buffer_size
    size_mb = total_size / (1024 * 1024)
    return size_mb

def save_eval_report(eval_res, output_dir, filename='eval_report.json'):
    import numpy as np
    path = os.path.join(output_dir, filename)

    def _to_py(o):
        # 递归把 numpy 类型转换成 Python 原生类型
        if isinstance(o, (np.integer,)):
            return int(o)
        if isinstance(o, (np.floating,)):
            return float(o)
        if isinstance(o, np.ndarray):
            return o.tolist()
        if isinstance(o, (list, tuple)):
            return [_to_py(x) for x in o]
        if isinstance(o, dict):
            return {k: _to_py(v) for k, v in o.items()}
        return o

    sanitized = _to_py(eval_res)
    with open(path, 'w', encoding='utf-8') as f:
        json.dump(sanitized, f, indent=2, ensure_ascii=False)
    print(f"评估结果已保存到: {path}")

def plot_confusion_matrix_simple(cm, class_names, output_dir, filename='confusion_matrix.png'):
    cm = np.array(cm)
    plt.figure(figsize=(8, 7))
    im = plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
    plt.colorbar(im)
    tick_marks = np.arange(len(class_names))
    plt.xticks(tick_marks, class_names, rotation=45, ha='right')
    plt.yticks(tick_marks, class_names)
    thresh = cm.max() / 2.0 if cm.size else 0
    for i in range(len(class_names)):
        for j in range(len(class_names)):
            plt.text(j, i, format(int(cm[i, j])), ha="center", va="center",
                     color="white" if cm[i, j] > thresh else "black")
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.tight_layout()
    out_path = os.path.join(output_dir, filename)
    plt.savefig(out_path, dpi=300, bbox_inches='tight')
    plt.close()
    print(f"混淆矩阵已保存到: {out_path}")

def main():
    """主入口：训练 -> 参数共享压缩 -> 最终评估（原模型+共享模型），并打印效率指标"""
    args = get_args()
    os.makedirs(args.output_dir, exist_ok=True)

    # 设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 模型
    model = load_resnet_model(args.model, args.num_classes, args.use_pretrained)
    if args.pretrained_path and os.path.isfile(args.pretrained_path):
        print(f"加载自定义预训练权重：{args.pretrained_path}")
        state = torch.load(args.pretrained_path, map_location='cpu')
        model.load_state_dict(state, strict=False)
    model.to(device)

    # 数据
    train_loader, test_loader = create_dataloaders(
        args.data_dir, args.batch_size, args.img_size, args.num_classes
    )
    visualize_samples(train_loader, args.output_dir)

    # 训练
    if args.mode in ['train', 'both']:
        print("=== 训练阶段 ===")
        model, best_acc, history = train_model(model, train_loader, test_loader, device, args)
        trained_ckpt = os.path.join(args.output_dir, f"{args.model}_trained.pth")
        torch.save(model.state_dict(), trained_ckpt)
        print(f"训练完成，最佳测试准确率: {best_acc:.4f}，权重保存至: {trained_ckpt}")

    # 小工具：若评估结果里缺少效率字段，临时补齐
    def _fill_efficiency_fields(eval_dict, mdl):
        if 'num_params' not in eval_dict:
            eval_dict['num_params'] = sum(p.numel() for p in mdl.parameters())
        if 'model_size_mb' not in eval_dict:
            eval_dict['model_size_mb'] = get_model_size(mdl)
        if 'gpu_peak_mem_mb' not in eval_dict:
            eval_dict['gpu_peak_mem_mb'] = None
        return eval_dict

    # === 原模型评估 ===
    print("=== 最终评估（原模型） ===")
    class_names = getattr(train_loader.dataset, "classes", [str(i) for i in range(args.num_classes)])
    base_eval = comprehensive_evaluation(model, test_loader, device, class_names)
    base_eval = _fill_efficiency_fields(base_eval, model)

    print("最终评估结果(原模型):")
    print(json.dumps({
        'accuracy':   float(base_eval['accuracy']),
        'precision':  float(base_eval['precision']),
        'recall':     float(base_eval['recall']),
        'f1_score':   float(base_eval['f1_score'])
    }, indent=2, ensure_ascii=False))

    # —— 打印效率指标（你要的这行）——
    print(
        "效率指标：",
        f"参数数量={base_eval['num_params']:,} | "
        f"模型大小={base_eval['model_size_mb']:.2f} MB | "
        f"峰值显存={base_eval['gpu_peak_mem_mb'] if base_eval['gpu_peak_mem_mb'] is not None else 'CPU / N/A'}"
    )

    save_eval_report(base_eval, args.output_dir, filename='eval_report.json')
    plot_confusion_matrix_simple(base_eval['confusion_matrix'], class_names, args.output_dir, filename='confusion_matrix.png')

    # === 参数共享压缩 & 评估 ===
    if args.mode in ['compress', 'both']:
        print("=== 参数共享压缩阶段（K-Means）===")
        # 复制一份模型（防止覆盖原权重）
        shared_model = copy.deepcopy(model).to(device).eval()

        parameter_sharing_kmeans(
            shared_model,
            k=args.num_shares,
            iters=args.ps_iters,
            apply_to=args.ps_apply_to
        )

        shared_ckpt = os.path.join(args.output_dir, f"{args.model}_shared.pth")
        torch.save(shared_model.state_dict(), shared_ckpt)
        print(f"参数共享后模型已保存到: {shared_ckpt}")

        print("=== 最终评估（共享模型） ===")
        shared_eval = comprehensive_evaluation(shared_model, test_loader, device, class_names)
        shared_eval = _fill_efficiency_fields(shared_eval, shared_model)

        print("最终评估结果(共享模型):")
        print(json.dumps({
            'accuracy':   float(shared_eval['accuracy']),
            'precision':  float(shared_eval['precision']),
            'recall':     float(shared_eval['recall']),
            'f1_score':   float(shared_eval['f1_score'])
        }, indent=2, ensure_ascii=False))

        # —— 打印效率指标（共享模型）——
        print(
            "效率指标：",
            f"参数数量={shared_eval['num_params']:,} | "
            f"模型大小={shared_eval['model_size_mb']:.2f} MB | "
            f"峰值显存={shared_eval['gpu_peak_mem_mb'] if shared_eval['gpu_peak_mem_mb'] is not None else 'CPU / N/A'}"
        )

        save_eval_report(shared_eval, args.output_dir, filename='eval_report_shared.json')
        plot_confusion_matrix_simple(shared_eval['confusion_matrix'], class_names, args.output_dir, filename='confusion_matrix_shared.png')

if __name__ == "__main__":
    main()