#!/usr/bin/env python
"""
改进的遥感图像分类模型训练和压缩示例 - ResNet50版本
解决过拟合问题，优化压缩策略，提升模型性能
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import DataLoader, Dataset, random_split
import copy
import time
import json
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from sklearn.cluster import KMeans
import matplotlib
from PIL import Image
import random
import zipfile
import tarfile
import shutil

try:
    import kagglehub
except Exception:
    kagglehub = None
    print("提示：未安装 kagglehub 或导入失败。如需自动下载，请先安装：pip install kagglehub")

def _is_image_file(p):
    return os.path.splitext(p)[1].lower() in {".jpg", ".jpeg", ".png", ".bmp", ".tif", ".tiff", ".webp"}

def _safe_copy(src, dst):
    os.makedirs(os.path.dirname(dst), exist_ok=True)
    if not os.path.isfile(dst):
        shutil.copy2(src, dst)

def _extract_if_archive(path):
    """若是 zip/tar 则解压到同目录同名文件夹，返回解压后的目录；否则原样返回。"""
    if os.path.isdir(path):
        return path
    base = os.path.dirname(path)
    name = os.path.splitext(os.path.basename(path))[0]
    out_dir = os.path.join(base, name)
    if zipfile.is_zipfile(path):
        os.makedirs(out_dir, exist_ok=True)
        with zipfile.ZipFile(path) as zf:
            zf.extractall(out_dir)
        return out_dir
    if tarfile.is_tarfile(path):
        os.makedirs(out_dir, exist_ok=True)
        with tarfile.open(path) as tf:
            tf.extractall(out_dir)
        return out_dir
    return path

def _find_class_root(search_root):
    """
    递归寻找“类别文件夹结构”的根目录：该目录下存在 ≥2 个子文件夹，且这些子文件夹里能找到图像文件。
    """
    if not os.path.isdir(search_root):
        return None
    subdirs = [d for d in os.listdir(search_root) if os.path.isdir(os.path.join(search_root, d))]
    candidate = []
    for sd in subdirs:
        sd_path = os.path.join(search_root, sd)
        imgs = [f for f in os.listdir(sd_path) if _is_image_file(f)]
        if imgs:
            candidate.append(sd_path)
    if len(candidate) >= 2:
        return search_root
    for sd in subdirs:
        res = _find_class_root(os.path.join(search_root, sd))
        if res is not None:
            return res
    return None

def _auto_download_kaggle_dataset(kaggle_dataset: str) -> str:
    """使用 kagglehub 下载数据集并返回本地目录（必要时自动解压）"""
    if kagglehub is None:
        raise RuntimeError("kagglehub 未安装或导入失败。请先 pip install kagglehub")
    print(f"开始使用 kagglehub 下载数据集：{kaggle_dataset}")
    path = kagglehub.dataset_download(kaggle_dataset)
    print("Kaggle 数据集下载完成，本地路径：", path)
    return _extract_if_archive(path)

def _materialize_raw_to_datadir(raw_root: str, data_dir: str):
    """将类别根目录下的 class 子目录拷贝到 data_dir/_raw 下（仅汇总，不切分）。"""
    class_root = _find_class_root(raw_root)
    if class_root is None:
        raise RuntimeError(f"未在 {raw_root} 找到类别文件夹结构（至少两个类别子文件夹且有图片）")
    raw_out = os.path.join(data_dir, "_raw")
    os.makedirs(raw_out, exist_ok=True)
    for cls in sorted(os.listdir(class_root)):
        src = os.path.join(class_root, cls)
        if not os.path.isdir(src):
            continue
        imgs = [f for f in os.listdir(src) if _is_image_file(f)]
        if not imgs:
            continue
        dst = os.path.join(raw_out, cls)
        os.makedirs(dst, exist_ok=True)
        for f in imgs:
            _safe_copy(os.path.join(src, f), os.path.join(dst, f))
    print(f"已将原始类别数据复制到：{raw_out}")
    return raw_out

def _split_from_raw(raw_root: str, out_root: str, test_size: float, seed: int = 42):
    """从 raw_root（classA/, classB/...）切分到 out_root/train & out_root/test"""
    random.seed(seed)
    train_root = os.path.join(out_root, "train")
    test_root = os.path.join(out_root, "test")
    os.makedirs(train_root, exist_ok=True)
    os.makedirs(test_root, exist_ok=True)

    classes = [d for d in os.listdir(raw_root) if os.path.isdir(os.path.join(raw_root, d))]
    for cls in classes:
        src = os.path.join(raw_root, cls)
        imgs = [f for f in os.listdir(src) if _is_image_file(f)]
        if not imgs:
            print(f"⚠️ 类别 {cls} 无图片，跳过"); continue
        random.shuffle(imgs)
        n_test = max(1, int(len(imgs) * test_size)) if len(imgs) >= 5 else max(1, len(imgs)//5)
        test_files = set(imgs[:n_test])
        train_files = imgs[n_test:]

        os.makedirs(os.path.join(train_root, cls), exist_ok=True)
        os.makedirs(os.path.join(test_root, cls), exist_ok=True)
        for f in train_files:
            _safe_copy(os.path.join(src, f), os.path.join(train_root, cls, f))
        for f in test_files:
            _safe_copy(os.path.join(src, f), os.path.join(test_root, cls, f))
    print(f"✅ 已生成划分：{out_root}（train/test）")
    return out_root

def _prepare_dataset_entrypoint(data_dir: str, auto_download: bool, kaggle_dataset: str, test_size: float) -> str:
    """
    返回可直接被 ImageFolder 使用的根目录：
      - 若 data_dir 已含 train/test -> 直接返回；
      - 否则若 data_dir 内部存在可识别的类别结构 -> 切分到 data_dir/train,test；
      - 否则若 auto_download=True -> 从 Kaggle 下载，复制到 data_dir/_raw，再切分；
      - 否则抛错。
    """
    if os.path.isdir(os.path.join(data_dir, "train")) and os.path.isdir(os.path.join(data_dir, "test")):
        return data_dir

    os.makedirs(data_dir, exist_ok=True)
    local_class_root = _find_class_root(data_dir)
    if local_class_root:
        print(f"在 {data_dir} 找到类别根：{local_class_root}，开始切分...")
        tmp_raw = os.path.join(data_dir, "_raw_local")
        if not os.path.isdir(tmp_raw):
            for cls in os.listdir(local_class_root):
                src = os.path.join(local_class_root, cls)
                if os.path.isdir(src):
                    imgs = [f for f in os.listdir(src) if _is_image_file(f)]
                    if imgs:
                        dst = os.path.join(tmp_raw, cls)
                        os.makedirs(dst, exist_ok=True)
                        for f in imgs:
                            _safe_copy(os.path.join(src, f), os.path.join(dst, f))
        _split_from_raw(tmp_raw, data_dir, test_size=test_size)
        return data_dir

    if auto_download:
        raw_path = _auto_download_kaggle_dataset(kaggle_dataset)
        raw_root = _materialize_raw_to_datadir(raw_path, data_dir)
        _split_from_raw(raw_root, data_dir, test_size=test_size)
        return data_dir

    raise RuntimeError(
        f"在 {data_dir} 未发现 train/test 或可识别的类别结构，且未启用 --auto-download。"
        " 请手动整理类别文件夹，或加 --auto-download 让程序自动下载并准备。"
    )


def _run_kmeans_1d(flat: np.ndarray, k: int, max_iter: int, random_state: int):
    """
    对 1D 向量做 KMeans 聚类，兼容 sklearn 不同版本。
    flat: 形如 [N,1] 的 numpy 数组
    返回: labels [N], centers [k]
    """
    k = max(1, int(k))
    try:
        km = KMeans(
            n_clusters=k,
            random_state=random_state,
            n_init="auto",          # 新版 sklearn 支持 "auto"
            max_iter=int(max_iter)
        )
    except TypeError:
        # 旧版 sklearn 不支持 "auto"
        km = KMeans(
            n_clusters=k,
            random_state=random_state,
            n_init=10,              # 旧版用 int
            max_iter=int(max_iter)
        )
    labels = km.fit_predict(flat)               # [N]
    centers = km.cluster_centers_.reshape(-1)   # 强制 [k]，避免 k=1 时 squeeze 成标量
    return labels, centers

def _kmeans_n_init():
    """
    兼容不同 sklearn 版本的 n_init 写法：
    - sklearn >= 1.4 用 "auto"
    - 旧版本用整数（比如 10）
    """
    try:
        import sklearn
        parts = sklearn.__version__.split(".")
        major = int(parts[0]); minor = int(parts[1])
        return "auto" if (major > 1 or (major == 1 and minor >= 4)) else 10
    except Exception:
        return 10


class EnsureRGB(object):
    """确保图像是 RGB（三通道），避免灰度/四通道导致归一化维度不匹配"""
    def __call__(self, img):
        # ImageFolder 返回 PIL.Image；这里也兼容 numpy 数组
        if isinstance(img, np.ndarray):
            img = Image.fromarray(img)
        if getattr(img, "mode", None) != "RGB":
            img = img.convert("RGB")
        return img


matplotlib.use('Agg')  # 使用非交互式后端
plt.rcParams['font.sans-serif'] = ['DejaVu Sans', 'Arial', 'sans-serif']  # 设置字体

def get_args():
    """解析命令行参数（含自动下载与自动兜底）"""
    parser = argparse.ArgumentParser(description="改进的ResNet遥感分类（权重共享压缩 + 微调）")

    script_dir = os.path.dirname(os.path.abspath(__file__))
    default_data_dir = os.path.normpath(os.path.join(script_dir, "..", "data_split"))
    env_data_dir = os.environ.get("RS_DATA_ROOT")
    if env_data_dir and os.path.isdir(env_data_dir):
        default_data_dir = env_data_dir

    parser.add_argument("--model", "-m", default="resnet50",
                        choices=["resnet18", "resnet50", "resnet101", "efficientnet_b0"],
                        help="要使用的模型架构")
    parser.add_argument("--data-dir", "-d", default=default_data_dir,
                        help="数据集根目录（应包含 train/ 与 test/ 或按类的原始文件夹）")
    parser.add_argument("--output-dir", "-o", default="./output", help="模型与结果输出目录")
    parser.add_argument("--num-classes", type=int, default=10, help="分类类别数（尽量与数据一致）")

    parser.add_argument("--epochs", "-e", type=int, default=25, help="训练轮数")
    parser.add_argument("--batch-size", "-b", type=int, default=16, help="批次大小")
    parser.add_argument("--learning-rate", "-lr", type=float, default=1e-3, help="基础学习率")
    parser.add_argument("--weight-decay", type=float, default=5e-4, help="权重衰减")
    parser.add_argument("--patience", type=int, default=8, help="early stopping patience（如需）")
    parser.add_argument("--dataset-size", type=int, default=5000, help="数据集大小（占位）")
    parser.add_argument("--img-size", type=int, default=224, help="输入图像尺寸")

    parser.add_argument("--mode", choices=['train', 'compress', 'both'], default='both',
                        help="运行模式：仅训练/仅压缩/两者都运行")
    parser.add_argument("--pretrained-path", type=str, default=None,
                        help="已训练权重路径（.pth），compress 模式下会加载")
    parser.add_argument("--use-pretrained", action="store_true", default=True,
                        help="使用ImageNet预训练权重（默认启用）")

    # 自动下载与切分
    parser.add_argument("--auto-download", action="store_true", default=False,
                        help="当 data-dir 不含 train/test 且无类别结构时，自动下载 Kaggle 数据集并切分")
    parser.add_argument("--kaggle-dataset", type=str,
                        default="mahmoudreda55/satellite-image-classification",
                        help="Kaggle 数据集标识（kagglehub 用），例如 'user/dataset-name'")
    parser.add_argument("--test-size", type=float, default=0.2,
                        help="自动划分测试集比例（0~1）")

    # 权重共享（你原有参数）
    parser.add_argument("--num-shares", type=int, default=16, help="KMeans 簇数（共享权重数）")
    parser.add_argument("--kmeans-max-iter", type=int, default=50, help="KMeans 最大迭代次数")
    parser.add_argument("--kmeans-random-state", type=int, default=0, help="KMeans 随机种子")
    parser.add_argument("--ws-apply-to", choices=["linear", "conv2d", "both"], default="linear",
                        help="对哪些层做权重共享（当前实现主要对 Linear 生效）")

    # 微调参数
    parser.add_argument("--finetune-epochs", type=int, default=3, help="共享后微调轮数")
    parser.add_argument("--finetune-lr-scale", type=float, default=0.1, help="微调学习率缩放")

    args = parser.parse_args()

    if not (0.0 < args.test_size < 1.0):
        raise ValueError(f"--test-size 必须在 (0,1) 区间内，当前为 {args.test_size}")

    # 自动兜底（直接点运行也能触发下载）
    def _has_train_test(dirpath: str) -> bool:
        return (os.path.isdir(os.path.join(dirpath, "train")) and
                os.path.isdir(os.path.join(dirpath, "test")))

    data_dir_exists = os.path.isdir(args.data_dir)
    data_ready = data_dir_exists and _has_train_test(args.data_dir)

    if not data_ready:
        if not data_dir_exists:
            print(f"[提示] 数据目录不存在: {args.data_dir}")
        if not args.auto_download:
            print("[提示] 未检测到可用数据集，已自动开启 --auto-download，并将使用 Kaggle 数据集。")
            args.auto_download = True
        if (not data_dir_exists) or (os.path.normpath(args.data_dir) == os.path.normpath(default_data_dir)):
            kaggle_dir = os.path.normpath(os.path.join(script_dir, "..", "data_split_kaggle"))
            if os.path.normpath(args.data_dir) != kaggle_dir:
                print(f"[提示] 将数据目录切换为（用于自动下载与切分）：{kaggle_dir}")
            args.data_dir = kaggle_dir

    return args

def load_resnet_model(model_name, num_classes=10, use_pretrained=True):
    """加载带有Dropout的ResNet模型"""
    print(f"加载 {model_name} 模型...")

    if model_name == "resnet18":
        from torchvision.models import resnet18, ResNet18_Weights
        if use_pretrained:
            model = resnet18(weights=ResNet18_Weights.IMAGENET1K_V1)
            print("使用ImageNet预训练的ResNet18")
        else:
            model = resnet18(weights=None)
            print("使用随机初始化的ResNet18")

        # 添加Dropout的分类器
        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "resnet50":
        from torchvision.models import resnet50, ResNet50_Weights
        if use_pretrained:
            model = resnet50(weights=ResNet50_Weights.IMAGENET1K_V2)
            print("使用ImageNet预训练的ResNet50")
        else:
            model = resnet50(weights=None)
            print("使用随机初始化的ResNet50")

        # 添加Dropout的分类器
        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "resnet101":
        from torchvision.models import resnet101, ResNet101_Weights
        if use_pretrained:
            model = resnet101(weights=ResNet101_Weights.IMAGENET1K_V2)
            print("使用ImageNet预训练的ResNet101")
        else:
            model = resnet101(weights=None)
            print("使用随机初始化的ResNet101")

        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "efficientnet_b0":
        from torchvision.models import efficientnet_b0, EfficientNet_B0_Weights
        if use_pretrained:
            model = efficientnet_b0(weights=EfficientNet_B0_Weights.IMAGENET1K_V1)
            print("使用ImageNet预训练的EfficientNet-B0")
        else:
            model = efficientnet_b0(weights=None)
            print("使用随机初始化的EfficientNet-B0")

        model.classifier = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.classifier[1].in_features, num_classes)
        )

    else:
        raise ValueError(f"不支持的模型: {model_name}")

    return model

def create_dataloaders(data_dir, batch_size=16, dataset_size=5000, img_size=224, num_classes=10,
                       auto_download=False, kaggle_dataset="mahmoudreda55/satellite-image-classification",
                       test_size=0.2):
    """从真实数据集目录创建 DataLoader（自动下载/切分支持）"""
    print("准备真实遥感图像分类数据集 (ImageFolder)...")

    # 先准备好可用的根目录（必要时下载+切分）
    root = _prepare_dataset_entrypoint(
        data_dir=data_dir,
        auto_download=auto_download,
        kaggle_dataset=kaggle_dataset,
        test_size=test_size
    )

    train_dir = os.path.join(root, "train")
    test_dir  = os.path.join(root, "test")

    train_transform = transforms.Compose([
        EnsureRGB(),
        transforms.RandomResizedCrop(img_size, scale=(0.8, 1.0)),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.RandomVerticalFlip(p=0.1),
        transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.05),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    test_transform = transforms.Compose([
        EnsureRGB(),
        transforms.Resize(int(img_size * 1.15)),
        transforms.CenterCrop(img_size),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    train_dataset = datasets.ImageFolder(train_dir, transform=train_transform)
    test_dataset  = datasets.ImageFolder(test_dir,  transform=test_transform)

    class_names = train_dataset.classes
    print(f"发现的类: {class_names}（共 {len(class_names)} 类）")
    if num_classes != len(class_names):
        print(f"[提示] --num-classes 建议设为 {len(class_names)}")

    num_workers = 0  # Windows 兼容
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                              num_workers=num_workers, pin_memory=True, drop_last=True)
    test_loader  = DataLoader(test_dataset,  batch_size=batch_size, shuffle=False,
                              num_workers=num_workers, pin_memory=True)

    print(f"训练集大小: {len(train_dataset)} | 测试集大小: {len(test_dataset)}")
    return train_loader, test_loader


def train_model(model, train_loader, test_loader, device, args):
    """训练分类模型（权重共享脚本适配版）
    - 标签平滑 CrossEntropy
    - AdamW 分组学习率（骨干网/分类头）
    - OneCycleLR（每个 batch 更新）
    - 梯度裁剪
    - 记录训练/测试准确率并保存最佳模型
    """
    print("开始训练ResNet模型...")

    # 损失函数（带标签平滑）
    criterion = nn.CrossEntropyLoss(label_smoothing=0.1)

    # 分组学习率（预训练时对 backbone 用更小 LR；兼容 ResNet(fc) / EfficientNet(classifier)）
    if getattr(args, "use_pretrained", False):
        backbone_params, head_params = [], []
        for name, param in model.named_parameters():
            if ("fc" in name) or ("classifier" in name):
                head_params.append(param)
            else:
                backbone_params.append(param)
        optimizer = optim.AdamW(
            [
                {"params": backbone_params, "lr": args.learning_rate * 0.1},
                {"params": head_params,     "lr": args.learning_rate},
            ],
            weight_decay=args.weight_decay,
        )
    else:
        optimizer = optim.AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)

    # OneCycleLR：按 batch 更新（关键！）
    scheduler = optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=args.learning_rate,
        epochs=args.epochs,
        steps_per_epoch=len(train_loader),
        pct_start=0.3,
    )

    train_losses, train_accuracies, test_accuracies = [], [], []
    best_test_acc = 0.0
    best_model_state = None

    # 可选早停（如提供了 patience>0）
    use_early_stop = getattr(args, "patience", 0) and args.patience > 0
    no_improve_epochs = 0

    for epoch in range(args.epochs):
        model.train()
        running_loss = 0.0
        correct_train = 0
        total_train = 0

        pbar = tqdm(train_loader, desc=f"Epoch {epoch + 1}/{args.epochs}")
        for images, labels in pbar:
            images, labels = images.to(device), labels.to(device)

            optimizer.zero_grad(set_to_none=True)
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()

            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)

            optimizer.step()
            scheduler.step()  # ← 每个 batch 调一次

            # 统计
            running_loss += loss.item()
            preds = outputs.argmax(dim=1)
            total_train += labels.size(0)
            correct_train += (preds == labels).sum().item()

            # 显示当前 LR
            current_lr = optimizer.param_groups[0]["lr"]
            pbar.set_postfix({
                "Loss": f"{loss.item():.4f}",
                "Acc":  f"{100.0 * correct_train / max(1, total_train):.2f}%",
                "LR":   f"{current_lr:.6f}",
            })

        # 本 epoch 训练指标
        avg_train_loss = running_loss / max(1, len(train_loader))
        train_acc = correct_train / max(1, total_train)

        # 测试准确率
        test_acc = evaluate_model(model, test_loader, device)

        train_losses.append(avg_train_loss)
        train_accuracies.append(train_acc)
        test_accuracies.append(test_acc)

        # 保存最佳
        if test_acc > best_test_acc:
            best_test_acc = test_acc
            best_model_state = copy.deepcopy(model.state_dict())
            no_improve_epochs = 0
        else:
            no_improve_epochs += 1

        print(
            f"Epoch [{epoch + 1}/{args.epochs}] - "
            f"Train Loss: {avg_train_loss:.4f}, "
            f"Train Acc: {train_acc:.4f}, "
            f"Test Acc: {test_acc:.4f}, "
            f"Best Test Acc: {best_test_acc:.4f}"
        )

        # 早停（可选）
        if use_early_stop and no_improve_epochs >= args.patience:
            print(f"早停触发：连续 {args.patience} 个 epoch 无提升，停止训练。")
            break

    # 回退到最佳权重
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f"训练完成! 最佳测试准确率: {best_test_acc:.4f}")

    # 曲线图
    plot_training_curves(train_losses, train_accuracies, test_accuracies, args.output_dir)

    return model, best_test_acc, {
        "train_losses": train_losses,
        "train_accuracies": train_accuracies,
        "test_accuracies": test_accuracies,
        "best_test_acc": best_test_acc,
    }

def evaluate_model(model, dataloader, device):
    """评估分类模型准确率"""
    model.eval()
    correct = 0
    total = 0

    with torch.no_grad():
        for images, labels in dataloader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = correct / total if total > 0 else 0
    return accuracy


def comprehensive_evaluation(model, dataloader, device, class_names):
    """
    全面评估模型：准确率/宏平均P/R/F1 + 混淆矩阵 + 额外效率指标
    （参数量、模型大小、推理时延、吞吐、显存占用）
    """
    model.eval()
    y_true, y_pred = [], []
    total_time = 0.0

    # 准备计时/显存
    use_cuda = (device.type == 'cuda' and torch.cuda.is_available())
    if use_cuda:
        torch.cuda.reset_peak_memory_stats(device)

    with torch.no_grad():
        for images, labels in dataloader:
            images, labels = images.to(device), labels.to(device)

            # 计时一次前向
            if use_cuda: torch.cuda.synchronize()
            t0 = time.perf_counter()
            outputs = model(images)
            if use_cuda: torch.cuda.synchronize()
            total_time += (time.perf_counter() - t0)

            _, preds = torch.max(outputs, 1)
            y_true.extend(labels.cpu().numpy().tolist())
            y_pred.extend(preds.cpu().numpy().tolist())

    # 指标
    from sklearn.metrics import (accuracy_score, precision_recall_fscore_support,
                                 confusion_matrix, classification_report)

    accuracy = accuracy_score(y_true, y_pred)
    precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(
        y_true, y_pred, average='macro', zero_division=0)
    precision_weighted, recall_weighted, f1_weighted, _ = precision_recall_fscore_support(
        y_true, y_pred, average='weighted', zero_division=0)
    cm = confusion_matrix(y_true, y_pred)

    # 每类指标（便于你查看四类 cloudy/desert/green_area/water）
    report_str = classification_report(y_true, y_pred, zero_division=0, output_dict=False)
    print("\n=== Per-class report ===\n" + report_str)

    # 效率指标
    num_params = sum(p.numel() for p in model.parameters())
    size_mb = get_model_size(model)

    num_images = len(y_true)
    avg_latency_ms = (total_time / max(1, num_images)) * 1000.0
    throughput = num_images / max(1e-9, total_time)

    gpu_mem_mb = None
    if use_cuda:
        gpu_mem_mb = torch.cuda.max_memory_allocated(device) / (1024 * 1024)

    print("=== Efficiency ===")
    print(f"参数量: {num_params:,}")
    print(f"模型大小: {size_mb:.2f} MB")
    print(f"平均推理时延: {avg_latency_ms:.3f} ms/图")
    print(f"吞吐: {throughput:.2f} img/s")
    if gpu_mem_mb is not None:
        print(f"评估过程峰值显存: {gpu_mem_mb:.2f} MB")

    return {
        'accuracy': accuracy,
        'precision': precision_macro,
        'recall': recall_macro,
        'f1_score': f1_macro,
        'precision_weighted': precision_weighted,
        'recall_weighted': recall_weighted,
        'f1_weighted': f1_weighted,
        'confusion_matrix': cm.tolist(),
        'y_true': y_true,
        'y_pred': y_pred,
        'num_params': num_params,
        'model_size_mb': size_mb,
        'avg_latency_ms': avg_latency_ms,
        'throughput_img_s': throughput,
        'gpu_peak_mem_mb': gpu_mem_mb
    }

def plot_training_curves(train_losses, train_accuracies, test_accuracies, output_dir):
    """绘制训练曲线"""
    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(15, 5))

    # 绘制损失曲线
    plt.subplot(1, 3, 1)
    plt.plot(epochs, train_losses, 'b-', label='Training Loss', linewidth=2)
    plt.title('Training Loss', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Loss', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 绘制准确率曲线
    plt.subplot(1, 3, 2)
    plt.plot(epochs, train_accuracies, 'b-', label='Training Accuracy', linewidth=2)
    plt.plot(epochs, test_accuracies, 'r-', label='Test Accuracy', linewidth=2)
    plt.title('Training and Test Accuracy', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Accuracy', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 绘制过拟合分析
    plt.subplot(1, 3, 3)
    overfitting_gap = [train_acc - test_acc for train_acc, test_acc in zip(train_accuracies, test_accuracies)]
    plt.plot(epochs, overfitting_gap, 'g-', label='Overfitting Gap', linewidth=2)
    plt.title('Overfitting Analysis', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Train Acc - Test Acc', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.axhline(y=0, color='k', linestyle='--', alpha=0.5)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'training_curves.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"训练曲线已保存到: {os.path.join(output_dir, 'training_curves.png')}")

def visualize_samples(dataloader, output_dir, num_samples=16):
    """可视化数据样本（自动读取数据集类名）"""
    class_names = getattr(dataloader.dataset, "classes", None)
    if class_names is None:
        class_names = [str(i) for i in range(10)]

    fig, axes = plt.subplots(4, 4, figsize=(16, 16))
    fig.suptitle('样本可视化', fontsize=16)

    images, labels = next(iter(dataloader))
    for i in range(min(num_samples, len(images))):
        row, col = i // 4, i % 4
        img = images[i].clone()
        mean = torch.tensor([0.485, 0.456, 0.406]).view(3, 1, 1)
        std  = torch.tensor([0.229, 0.224, 0.225]).view(3, 1, 1)
        img = torch.clamp(img * std + mean, 0, 1).permute(1, 2, 0).numpy()
        axes[row, col].imshow(img)
        label_idx = int(labels[i])
        title = class_names[label_idx] if label_idx < len(class_names) else f"Class {label_idx}"
        axes[row, col].set_title(title, fontsize=12)
        axes[row, col].axis('off')

    plt.tight_layout()
    out_path = os.path.join(output_dir, 'data_samples.png')
    plt.savefig(out_path, dpi=300, bbox_inches='tight')
    plt.close()
    print(f"数据样本已保存到: {out_path}")

def get_model_size(model):
    """计算模型在内存中的大小（MB）"""
    param_size = 0
    buffer_size = 0

    for param in model.parameters():
        param_size += param.nelement() * param.element_size()

    for buffer in model.buffers():
        buffer_size += buffer.nelement() * buffer.element_size()

    total_size = param_size + buffer_size
    size_mb = total_size / (1024 * 1024)
    return size_mb

def main():
    """主入口：自动准备数据 → 训练 →（可选）权重共享压缩+微调 → 最终评估"""
    args = get_args()
    os.makedirs(args.output_dir, exist_ok=True)

    # 设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 构建/加载模型
    model = load_resnet_model(args.model, args.num_classes, args.use_pretrained)
    model.to(device)

    # === 训练阶段 ===
    if args.mode in ['train', 'both']:
        print("=== 训练阶段 ===")
        train_loader, test_loader = create_dataloaders(
            args.data_dir, args.batch_size, args.dataset_size, args.img_size, args.num_classes,
            auto_download=args.auto_download,
            kaggle_dataset=args.kaggle_dataset,
            test_size=args.test_size
        )
        visualize_samples(train_loader, args.output_dir)

        model, best_acc, history = train_model(model, train_loader, test_loader, device, args)
        trained_ckpt = os.path.join(args.output_dir, f"{args.model}_trained.pth")
        torch.save(model.state_dict(), trained_ckpt)
        print(f"训练完成，最佳测试准确率: {best_acc:.4f}，权重保存至: {trained_ckpt}")

    # === 权重共享压缩 + 微调（compress 或 both 时执行）===
    if args.mode in ['compress', 'both']:
        print("=== 权重共享阶段（KMeans 权重聚类）===")

        # 仅对 Linear 做权重共享（当前实现）
        for name, module in model.named_modules():
            if isinstance(module, nn.Linear):
                W = module.weight.data.cpu().numpy().astype(np.float32, copy=False)  # [out, in]
                flat = W.reshape(-1, 1)  # N×1

                labels, centers = _run_kmeans_1d(
                    flat=flat,
                    k=args.num_shares,
                    max_iter=args.kmeans_max_iter,
                    random_state=args.kmeans_random_state
                )

                new_flat = centers[labels]                          # N
                new_W = new_flat.reshape(W.shape)                   # [out, in]
                with torch.no_grad():
                    module.weight.copy_(
                        torch.from_numpy(new_W).to(module.weight.dtype).to(module.weight.device)
                    )

        ws_path = os.path.join(args.output_dir, f"{args.model}_ws.pth")
        torch.save(model.state_dict(), ws_path)
        print(f"权重共享后模型已保存到: {ws_path}")

        # 微调（降低学习率、少量轮次）
        print("=== 微调权重共享模型 ===")
        train_loader, test_loader = create_dataloaders(
            args.data_dir, args.batch_size, args.dataset_size, args.img_size, args.num_classes,
            auto_download=args.auto_download,
            kaggle_dataset=args.kaggle_dataset,
            test_size=args.test_size
        )
        model.to(device)

        orig_epochs, orig_lr = args.epochs, args.learning_rate
        args.epochs = args.finetune_epochs
        args.learning_rate = orig_lr * args.finetune_lr_scale

        model, tuned_acc, _ = train_model(model, train_loader, test_loader, device, args)
        print(f"微调完成，测试准确率: {tuned_acc:.4f}")

        # 恢复超参
        args.epochs, args.learning_rate = orig_epochs, orig_lr

    # === 最终评估 ===
    print("=== 最终评估 ===")
    train_loader_tmp, test_loader = create_dataloaders(
        args.data_dir, args.batch_size, args.dataset_size, args.img_size, args.num_classes,
        auto_download=args.auto_download,
        kaggle_dataset=args.kaggle_dataset,
        test_size=args.test_size
    )
    class_names = getattr(train_loader_tmp.dataset, "classes", [str(i) for i in range(args.num_classes)])

    model.to(device).eval()
    eval_res = comprehensive_evaluation(model, test_loader, device, class_names)
    print("最终评估结果:")
    print(json.dumps({
        'accuracy':  eval_res['accuracy'],
        'precision': eval_res['precision'],
        'recall':    eval_res['recall'],
        'f1_score':  eval_res['f1_score']
    }, indent=2, ensure_ascii=False))

def plot_confusion_matrices(original_eval, finetuned_eval, class_names, output_dir):
    """绘制混淆矩阵对比"""
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 8))

    # 原始模型混淆矩阵
    im1 = ax1.imshow(original_eval['confusion_matrix'], interpolation='nearest', cmap=plt.cm.Blues)
    ax1.figure.colorbar(im1, ax=ax1)
    ax1.set(xticks=np.arange(len(class_names)),
            yticks=np.arange(len(class_names)),
            xticklabels=class_names, yticklabels=class_names,
            title=f"Original Model\nAccuracy: {original_eval['accuracy']:.3f}",
            ylabel='True label',
            xlabel='Predicted label')

    # 添加数值标注
    thresh1 = np.array(original_eval['confusion_matrix']).max() / 2.
    for i in range(len(class_names)):
        for j in range(len(class_names)):
            ax1.text(j, i, format(original_eval['confusion_matrix'][i][j], 'd'),
                     ha="center", va="center",
                     color="white" if original_eval['confusion_matrix'][i][j] > thresh1 else "black")

    # 压缩后模型混淆矩阵
    im2 = ax2.imshow(finetuned_eval['confusion_matrix'], interpolation='nearest', cmap=plt.cm.Blues)
    ax2.figure.colorbar(im2, ax=ax2)
    ax2.set(xticks=np.arange(len(class_names)),
            yticks=np.arange(len(class_names)),
            xticklabels=class_names, yticklabels=class_names,
            title=f"Compressed Model\nAccuracy: {finetuned_eval['accuracy']:.3f}",
            ylabel='True label',
            xlabel='Predicted label')

    # 添加数值标注
    thresh2 = np.array(finetuned_eval['confusion_matrix']).max() / 2.
    for i in range(len(class_names)):
        for j in range(len(class_names)):
            ax2.text(j, i, format(finetuned_eval['confusion_matrix'][i][j], 'd'),
                     ha="center", va="center",
                     color="white" if finetuned_eval['confusion_matrix'][i][j] > thresh2 else "black")

    plt.setp(ax1.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
    plt.setp(ax2.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'confusion_matrices_comparison.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"混淆矩阵对比已保存到: {os.path.join(output_dir, 'confusion_matrices_comparison.png')}")

if __name__ == "__main__":
    main()