"""
改进的遥感图像分类模型训练和压缩示例 - ResNet50版本
解决过拟合问题，优化压缩策略，提升模型性能
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset, random_split
import copy
import time
import json
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import matplotlib
import autokeras as ak
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, confusion_matrix
import torchvision.datasets as datasets
from PIL import Image
import zipfile, tarfile, shutil, random

def _is_image_file(p):
    return os.path.splitext(p)[1].lower() in {".jpg", ".jpeg", ".png", ".bmp", ".tif", ".tiff", ".webp"}

def _extract_if_archive(path: str) -> str:
    """若 path 是 zip/tar 则解压到同目录同名文件夹，返回解压后的目录；否则原样返回。"""
    if os.path.isdir(path):
        return path
    base = os.path.dirname(path)
    name = os.path.splitext(os.path.basename(path))[0]
    out_dir = os.path.join(base, name)
    try:
        if zipfile.is_zipfile(path):
            os.makedirs(out_dir, exist_ok=True)
            with zipfile.ZipFile(path) as zf:
                zf.extractall(out_dir)
            return out_dir
        if tarfile.is_tarfile(path):
            os.makedirs(out_dir, exist_ok=True)
            with tarfile.open(path) as tf:
                tf.extractall(out_dir)
            return out_dir
    except Exception as e:
        print(f"[警告] 解压失败：{e}")
    return path

def _find_class_root(search_root: str):
    if not os.path.isdir(search_root):
        return None
    subdirs = [d for d in os.listdir(search_root) if os.path.isdir(os.path.join(search_root, d))]
    candidate = []
    for sd in subdirs:
        sd_path = os.path.join(search_root, sd)
        imgs = [f for f in os.listdir(sd_path) if _is_image_file(f)]
        if imgs:
            candidate.append(sd_path)
    if len(candidate) >= 2:
        return search_root
    for sd in subdirs:
        sub = os.path.join(search_root, sd)
        res = _find_class_root(sub)
        if res is not None:
            return res
    return None

class EnsureRGB(object):
    """确保图像是 RGB（三通道），避免灰度/四通道导致归一化维度不匹配"""
    def __call__(self, img):
        if getattr(img, "mode", None) != "RGB":
            img = img.convert("RGB")
        return img

def ensure_clean_dir(path: str):
    """确保 path 是一个可用的目录：若存在且是文件则删除；若不存在则创建。"""
    if os.path.exists(path) and not os.path.isdir(path):
        os.remove(path)
    os.makedirs(path, exist_ok=True)


matplotlib.use('Agg')  # 使用非交互式后端
plt.rcParams['font.sans-serif'] = ['DejaVu Sans', 'Arial', 'sans-serif']  # 设置字体

def get_args():
    """解析命令行参数（支持自动下载 Kaggle 并自动划分）"""
    import argparse, os

    parser = argparse.ArgumentParser(description="改进的ResNet50遥感图像分类训练 + NAS 架构搜索")

    script_dir = os.path.dirname(os.path.abspath(__file__))
    default_data_dir = os.path.normpath(os.path.join(script_dir, "..", "data_split"))
    env_data_dir = os.environ.get("RS_DATA_ROOT")
    if env_data_dir and os.path.isdir(env_data_dir):
        default_data_dir = env_data_dir

    # 基本参数
    parser.add_argument("--model", "-m", default="resnet50",
                        choices=["resnet18", "resnet50", "resnet101", "efficientnet_b0"])
    parser.add_argument("--data-dir", "-d", default=default_data_dir,
                        help="数据集根目录（包含 train/ 与 test/ 子目录；若无则自动创建）")
    parser.add_argument("--output-dir", "-o", default="./output", help="结果输出目录")
    parser.add_argument("--num-classes", type=int, default=4, help="分类类别数")

    # 自动下载（保持你的默认开启）
    parser.add_argument("--auto-download", action="store_true", default=True,
                        help="若本地没有数据集则自动下载（默认开启）")
    parser.add_argument("--kaggle-dataset", type=str,
                        default="mahmoudreda55/satellite-image-classification",
                        help="Kaggle 数据集名称")
    parser.add_argument("--test-size", type=float, default=0.2,
                        help="自动划分测试集比例 (0~1)")

    # 训练 / NAS 等其余参数（保持不变）
    parser.add_argument("--epochs", "-e", type=int, default=25)
    parser.add_argument("--batch-size", "-b", type=int, default=16)
    parser.add_argument("--learning-rate", "-lr", type=float, default=1e-3)
    parser.add_argument("--weight-decay", type=float, default=5e-4)
    parser.add_argument("--patience", type=int, default=8)
    parser.add_argument("--dataset-size", type=int, default=5000)
    parser.add_argument("--img-size", type=int, default=224)
    parser.add_argument("--mode", choices=['train', 'nas', 'compress', 'both'], default='both')
    parser.add_argument("--pretrained-path", type=str, default=None)
    parser.add_argument("--use-pretrained", action="store_true", default=True)
    parser.add_argument("--resume", action="store_true", default=False)
    parser.add_argument("--resume-path", type=str, default=None)
    parser.add_argument("--finetune-epochs", type=int, default=3)
    parser.add_argument("--finetune-lr", type=float, default=1e-4)
    parser.add_argument("--nas", action="store_true", default=False)
    parser.add_argument("--nas-dir", type=str, default=os.path.join(script_dir, "ak_nas"))
    parser.add_argument("--max-trials", type=int, default=15)
    parser.add_argument("--nas-epochs", type=int, default=10)

    args = parser.parse_args()

    # 同步到环境变量（供 create_dataloaders 使用，无需改函数签名）
    os.environ.setdefault("RS_AUTO_DOWNLOAD", "1" if args.auto_download else "0")
    os.environ.setdefault("RS_KAGGLE_ID", args.kaggle_dataset)
    os.environ.setdefault("RS_TEST_SIZE", str(args.test_size))

    return args

def load_resnet_model(model_name, num_classes=10, use_pretrained=True):
    """加载带有Dropout的ResNet模型"""
    print(f"加载 {model_name} 模型...")

    if model_name == "resnet18":
        from torchvision.models import resnet18, ResNet18_Weights
        if use_pretrained:
            model = resnet18(weights=ResNet18_Weights.IMAGENET1K_V1)
            print("使用ImageNet预训练的ResNet18")
        else:
            model = resnet18(weights=None)
            print("使用随机初始化的ResNet18")

        # 添加Dropout的分类器
        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "resnet50":
        from torchvision.models import resnet50, ResNet50_Weights
        if use_pretrained:
            model = resnet50(weights=ResNet50_Weights.IMAGENET1K_V2)
            print("使用ImageNet预训练的ResNet50")
        else:
            model = resnet50(weights=None)
            print("使用随机初始化的ResNet50")

        # 添加Dropout的分类器
        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "resnet101":
        from torchvision.models import resnet101, ResNet101_Weights
        if use_pretrained:
            model = resnet101(weights=ResNet101_Weights.IMAGENET1K_V2)
            print("使用ImageNet预训练的ResNet101")
        else:
            model = resnet101(weights=None)
            print("使用随机初始化的ResNet101")

        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "efficientnet_b0":
        from torchvision.models import efficientnet_b0, EfficientNet_B0_Weights
        if use_pretrained:
            model = efficientnet_b0(weights=EfficientNet_B0_Weights.IMAGENET1K_V1)
            print("使用ImageNet预训练的EfficientNet-B0")
        else:
            model = efficientnet_b0(weights=None)
            print("使用随机初始化的EfficientNet-B0")

        model.classifier = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.classifier[1].in_features, num_classes)
        )

    else:
        raise ValueError(f"不支持的模型: {model_name}")

    return model

def create_dataloaders(data_dir, batch_size=16, dataset_size=0, img_size=224, num_classes=4):
    """ImageFolder 数据加载器：若未找到 train/test，会自动下载 Kaggle 并切分。"""
    print("准备真实遥感图像分类数据集 (ImageFolder)...")
    data_dir = os.path.abspath(data_dir)
    train_dir = os.path.join(data_dir, "train")
    test_dir  = os.path.join(data_dir, "test")

    def _split_raw_to_train_test(raw_root: str, out_root: str, test_size: float = 0.2, seed: int = 42):
        import random, shutil
        random.seed(seed)
        tr_root = os.path.join(out_root, "train")
        te_root = os.path.join(out_root, "test")
        os.makedirs(tr_root, exist_ok=True)
        os.makedirs(te_root, exist_ok=True)
        classes = [d for d in os.listdir(raw_root) if os.path.isdir(os.path.join(raw_root, d))]
        for cls in classes:
            src = os.path.join(raw_root, cls)
            imgs = [f for f in os.listdir(src) if _is_image_file(f)]
            if not imgs:
                print(f"⚠️ 类别 {cls} 无图片，跳过")
                continue
            random.shuffle(imgs)
            n_test = max(1, int(len(imgs) * test_size)) if len(imgs) >= 5 else max(1, len(imgs)//5)
            te_files = set(imgs[:n_test])
            tr_files = imgs[n_test:]
            os.makedirs(os.path.join(tr_root, cls), exist_ok=True)
            os.makedirs(os.path.join(te_root, cls), exist_ok=True)
            for f in tr_files:
                shutil.copy2(os.path.join(src, f), os.path.join(tr_root, cls, f))
            for f in te_files:
                shutil.copy2(os.path.join(src, f), os.path.join(te_root, cls, f))
        print(f"✅ 已生成划分：{out_root}（train/test）")

    # 若没有现成的 train/test，自动下载并切分
    if not (os.path.isdir(train_dir) and os.path.isdir(test_dir)):
        auto_dl = os.environ.get("RS_AUTO_DOWNLOAD", "1") != "0"
        if not auto_dl:
            raise FileNotFoundError(
                f"未找到 {data_dir}\\train 与 {data_dir}\\test，且已禁用自动下载（RS_AUTO_DOWNLOAD=0）。"
            )
        try:
            import kagglehub
        except Exception:
            raise SystemExit("自动下载需要 kagglehub。请先安装：pip install kagglehub")

        kaggle_id = os.environ.get("RS_KAGGLE_ID", "mahmoudreda55/satellite-image-classification")
        print(f"[数据准备] 使用 kagglehub 下载：{kaggle_id}")
        path = kagglehub.dataset_download(kaggle_id)
        print("Kaggle 数据集下载完成，本地路径：", path)

        # 解压 & 寻找类别根
        path = _extract_if_archive(path)
        class_root = _find_class_root(path)
        if class_root is None:
            raise SystemExit(f"未在 {path} 找到类别文件夹结构（需要子文件夹为类别且内有图片）。")

        # 复制到 _raw 并切分
        os.makedirs(data_dir, exist_ok=True)
        tmp_raw = os.path.join(data_dir, "_raw")
        if os.path.isdir(tmp_raw):
            shutil.rmtree(tmp_raw)
        os.makedirs(tmp_raw, exist_ok=True)

        for cls in sorted(os.listdir(class_root)):
            src = os.path.join(class_root, cls)
            if not os.path.isdir(src):
                continue
            imgs = [f for f in os.listdir(src) if _is_image_file(f)]
            if not imgs:
                continue
            dst = os.path.join(tmp_raw, cls)
            os.makedirs(dst, exist_ok=True)
            for f in imgs:
                shutil.copy2(os.path.join(src, f), os.path.join(dst, f))

        test_size = float(os.environ.get("RS_TEST_SIZE", "0.2"))
        _split_raw_to_train_test(tmp_raw, data_dir, test_size=test_size, seed=42)

    # —— 到这里，一定有 train/test —— #
    expected = {"cloudy", "desert", "green_area", "water"}
    found_train = set([d for d in os.listdir(train_dir) if os.path.isdir(os.path.join(train_dir, d))])
    if not expected.issubset(found_train):
        print(f"[提示] 检测到的训练类别: {sorted(found_train)}（不完全等于期望 {sorted(expected)}，这不影响训练）")

    train_transform = transforms.Compose([
        EnsureRGB(),
        transforms.RandomResizedCrop(img_size, scale=(0.8, 1.0)),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.RandomVerticalFlip(p=0.1),
        transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.05),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    test_transform = transforms.Compose([
        EnsureRGB(),
        transforms.Resize(int(img_size * 1.15)),
        transforms.CenterCrop(img_size),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    train_dataset = datasets.ImageFolder(train_dir, transform=train_transform)
    test_dataset  = datasets.ImageFolder(test_dir,  transform=test_transform)

    class_names = train_dataset.classes
    print(f"发现的类: {class_names}（共 {len(class_names)} 类）")
    if len(class_names) != num_classes:
        print(f"[提示] 传入 num_classes={num_classes} 与数据中类数 {len(class_names)} 不一致。建议设为 {len(class_names)}")

    if dataset_size and dataset_size > 0 and dataset_size < len(train_dataset):
        indices = torch.randperm(len(train_dataset))[:dataset_size].tolist()
        train_dataset = torch.utils.data.Subset(train_dataset, indices)
        setattr(train_dataset, "classes", class_names)  # 兼容后续可视化

    num_workers = 0  # Windows 更稳
    pin = torch.cuda.is_available()
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                              num_workers=num_workers, pin_memory=pin, drop_last=True)
    test_loader  = DataLoader(test_dataset,  batch_size=batch_size, shuffle=False,
                              num_workers=num_workers, pin_memory=pin)

    print(f"训练集大小: {len(train_dataset)} | 测试集大小: {len(test_dataset)}")
    return train_loader, test_loader

def train_model(model, train_loader, test_loader, device, args):
    """改进的训练函数，解决过拟合问题"""
    print("开始训练ResNet模型...")

    # 损失函数（带标签平滑）
    criterion = nn.CrossEntropyLoss(label_smoothing=0.1)

    # 分层学习率设置
    if args.use_pretrained:
        backbone_params, classifier_params = [], []
        for name, param in model.named_parameters():
            if 'fc' in name:
                classifier_params.append(param)
            else:
                backbone_params.append(param)
        optimizer = optim.AdamW([
            {'params': backbone_params, 'lr': args.learning_rate * 0.1},
            {'params': classifier_params, 'lr': args.learning_rate}
        ], weight_decay=args.weight_decay)
    else:
        optimizer = optim.AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)

    # One-Cycle 调度器（按 epoch 更新）
    scheduler = optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=args.learning_rate,
        epochs=args.epochs,
        steps_per_epoch=len(train_loader),
        pct_start=0.3
    )

    train_losses, train_accuracies, test_accuracies = [], [], []
    best_test_acc = 0.0
    best_model_state = None

    for epoch in range(args.epochs):
        model.train()
        running_loss = 0.0
        correct_train = 0
        total_train = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for images, labels in pbar:
            images, labels = images.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()

            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)

            optimizer.step()
            # —— 不要在这里 scheduler.step() ——

            # 记录
            running_loss += loss.item()
            pred = outputs.argmax(dim=1)
            total_train += labels.size(0)
            correct_train += (pred == labels).sum().item()

            current_lr = scheduler.get_last_lr()[0]
            pbar.set_postfix({
                'Loss': f'{loss.item():.4f}',
                'Acc': f'{100. * correct_train / total_train:.2f}%',
                'LR': f'{current_lr:.6f}'
            })

        # —— Epoch 结束后更新学习率
        scheduler.step()

        # 验证
        avg_train_loss = running_loss / len(train_loader)
        train_acc = correct_train / total_train
        test_acc = evaluate_model(model, test_loader, device)

        train_losses.append(avg_train_loss)
        train_accuracies.append(train_acc)
        test_accuracies.append(test_acc)

        if test_acc > best_test_acc:
            best_test_acc = test_acc
            best_model_state = copy.deepcopy(model.state_dict())

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train Acc: {train_acc:.4f}, '
              f'Test Acc: {test_acc:.4f}, '
              f'Best Test Acc: {best_test_acc:.4f}')

    # 加载表现最好的模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f'训练完成! 最佳测试准确率: {best_test_acc:.4f}')

    # 绘制曲线
    plot_training_curves(train_losses, train_accuracies, test_accuracies, args.output_dir)

    return model, best_test_acc, {
        'train_losses': train_losses,
        'train_accuracies': train_accuracies,
        'test_accuracies': test_accuracies,
        'best_test_acc': best_test_acc
    }

def evaluate_model(model, dataloader, device):
    """评估分类模型准确率"""
    model.eval()
    correct = 0
    total = 0

    with torch.no_grad():
        for images, labels in dataloader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = correct / total if total > 0 else 0
    return accuracy

def nas_train_model(train_loader, test_loader, args):
    """
    AutoKeras NAS：
    - 聚合多个 batch，保证至少 2 个 batch；
    - 显式 validation_data，避免内部再切分；
    - 工程目录固定到纯英文 args.nas_dir；
    - 不用 clf.evaluate()，直接导出最佳 Keras 模型，用其 evaluate/predict 评估；
    - Keras 3 保存：写成 .keras 文件，必要时回退 .h5。
    """
    # —— 聚合足量样本（>= 2 个 batch） —— #
    def take_batches(loader, max_items):
        xs, ys, total = [], [], 0
        for x, y in loader:
            xs.append(x)
            ys.append(y)
            total += x.size(0)
            if total >= max_items:
                break
        if not xs:
            raise ValueError("DataLoader 为空，无法进行 NAS。")
        X = torch.cat(xs, dim=0)
        y = torch.cat(ys, dim=0)
        return X, y

    bs_train = getattr(train_loader, "batch_size", 16) or 16
    bs_test  = getattr(test_loader, "batch_size", 16) or 16
    n_train  = max(64, 2 * bs_train)
    n_val    = max(32, 2 * bs_test)

    Xtr_t, ytr_t = take_batches(train_loader, n_train)
    Xte_t, yte_t = take_batches(test_loader, n_val)

    # —— 转 NHWC float32 —— #
    X_train = Xtr_t.numpy().transpose(0, 2, 3, 1).astype("float32")
    X_test  = Xte_t.numpy().transpose(0, 2, 3, 1).astype("float32")
    y_train = ytr_t.numpy()
    y_test  = yte_t.numpy()

    # —— 纯英文 NAS 根目录 —— #
    proj_root = os.path.abspath(args.nas_dir)  # 例如 C:\ak_nas
    if os.path.exists(proj_root) and not os.path.isdir(proj_root):
        os.remove(proj_root)
    os.makedirs(proj_root, exist_ok=True)

    project_name = "run1"
    project_dir = os.path.join(proj_root, project_name)
    os.makedirs(project_dir, exist_ok=True)
    print(f"NAS 工程目录: {project_dir}")

    # —— AutoKeras 搜索 —— #
    clf = ak.ImageClassifier(
        overwrite=True,
        max_trials=args.max_trials,
        directory=proj_root,
        project_name=project_name
    )

    clf.fit(
        X_train, y_train,
        epochs=args.nas_epochs,
        validation_data=(X_test, y_test),
        batch_size=16,
        verbose=2
    )

    # —— 导出最佳 Keras 模型 —— #
    model = clf.export_model()

    # 先评估（避免保存失败影响到你看到 acc）
    try:
        try:
            model.compile(optimizer="adam",
                          loss="sparse_categorical_crossentropy",
                          metrics=["accuracy"])
        except Exception:
            pass
        results = model.evaluate(X_test, y_test, verbose=0)
        if isinstance(results, (list, tuple)) and len(results) >= 2:
            acc = float(results[1])
        elif isinstance(results, dict) and "accuracy" in results:
            acc = float(results["accuracy"])
        else:
            raise RuntimeError("Unknown evaluate return; fallback to predict.")
    except Exception:
        y_prob = model.predict(X_test, verbose=0)
        y_pred = y_prob.argmax(axis=1)
        acc = float((y_pred == y_test).mean())

    print(f"NAS 搜索后最佳模型在测试集上的准确率: {acc:.4f}")

    # 再保存（Keras 3：需指定文件扩展名）
    keras_path = os.path.join(project_dir, "exported_model.keras")
    try:
        model.save(keras_path)  # Keras 3 推荐：.keras
        print(f"NAS 模型已保存到: {keras_path}")
    except Exception as e1:
        # 回退到 .h5
        h5_path = os.path.join(project_dir, "exported_model.h5")
        try:
            model.save(h5_path)
            print(f"NAS 模型已保存到: {h5_path}（使用 H5 备选）")
        except Exception as e2:
            print(f"[警告] 模型保存失败：{e1} | 备选 .h5 也失败：{e2}")

    return acc

def comprehensive_evaluation(model, dataloader, device, class_names):
    """
    全面评估：Accuracy、宏/加权 P-R-F1、每类 P-R-F1、混淆矩阵（原始+行归一化）
    以及效率指标（参数量、模型大小、平均时延、吞吐、显存峰值）。
    """
    model.eval()
    y_true, y_pred = [], []
    total_time = 0.0

    use_cuda = (device.type == 'cuda' and torch.cuda.is_available())
    if use_cuda:
        torch.cuda.reset_peak_memory_stats(device)

    with torch.no_grad():
        for images, labels in dataloader:
            images, labels = images.to(device), labels.to(device)

            if use_cuda: torch.cuda.synchronize()
            t0 = time.perf_counter()
            outputs = model(images)
            if use_cuda: torch.cuda.synchronize()
            total_time += (time.perf_counter() - t0)

            _, preds = torch.max(outputs, 1)
            y_true.extend(labels.cpu().numpy().tolist())
            y_pred.extend(preds.cpu().numpy().tolist())

    # 总体指标
    accuracy = accuracy_score(y_true, y_pred)
    precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(
        y_true, y_pred, average='macro', zero_division=0)
    precision_weighted, recall_weighted, f1_weighted, _ = precision_recall_fscore_support(
        y_true, y_pred, average='weighted', zero_division=0)

    # 每类指标
    per_class_p, per_class_r, per_class_f1, support = precision_recall_fscore_support(
        y_true, y_pred, average=None, zero_division=0, labels=list(range(len(class_names))))
    per_class = []
    for i, name in enumerate(class_names):
        per_class.append({
            "class": name,
            "precision": float(per_class_p[i]),
            "recall": float(per_class_r[i]),
            "f1": float(per_class_f1[i]),
            "support": int(support[i]),
        })

    # 混淆矩阵
    cm = confusion_matrix(y_true, y_pred, labels=list(range(len(class_names))))
    cm_norm = cm.astype(np.float64)
    row_sums = cm_norm.sum(axis=1, keepdims=True)
    row_sums[row_sums == 0] = 1.0
    cm_norm = (cm_norm / row_sums).tolist()

    # 效率
    num_params = sum(p.numel() for p in model.parameters())
    size_mb = get_model_size(model)
    num_images = len(y_true)
    avg_latency_ms = (total_time / max(1, num_images)) * 1000.0
    throughput = num_images / max(1e-9, total_time)
    gpu_mem_mb = torch.cuda.max_memory_allocated(device) / (1024 * 1024) if use_cuda else None

    print("\n=== Per-class report ===")
    for item in per_class:
        print(f"{item['class']:<12s} P={item['precision']:.3f} R={item['recall']:.3f} F1={item['f1']:.3f} (n={item['support']})")

    print("\n=== Efficiency ===")
    print(f"参数量: {num_params:,}")
    print(f"模型大小: {size_mb:.2f} MB")
    print(f"平均推理时延: {avg_latency_ms:.3f} ms/图")
    print(f"吞吐: {throughput:.2f} img/s")
    if gpu_mem_mb is not None:
        print(f"评估峰值显存: {gpu_mem_mb:.2f} MB")

    return {
        'accuracy': float(accuracy),
        'precision_macro': float(precision_macro),
        'recall_macro': float(recall_macro),
        'f1_macro': float(f1_macro),
        'precision_weighted': float(precision_weighted),
        'recall_weighted': float(recall_weighted),
        'f1_weighted': float(f1_weighted),
        'per_class': per_class,
        'confusion_matrix': cm.astype(int).tolist(),
        'confusion_matrix_row_normalized': cm_norm,
        'y_true': y_true,
        'y_pred': y_pred,
        'num_params': int(num_params),
        'model_size_mb': float(size_mb),
        'avg_latency_ms': float(avg_latency_ms),
        'throughput_img_s': float(throughput),
        'gpu_peak_mem_mb': float(gpu_mem_mb) if gpu_mem_mb is not None else None
    }

def plot_training_curves(train_losses, train_accuracies, test_accuracies, output_dir):
    """绘制训练曲线"""
    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(15, 5))

    # 绘制损失曲线
    plt.subplot(1, 3, 1)
    plt.plot(epochs, train_losses, 'b-', label='Training Loss', linewidth=2)
    plt.title('Training Loss', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Loss', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 绘制准确率曲线
    plt.subplot(1, 3, 2)
    plt.plot(epochs, train_accuracies, 'b-', label='Training Accuracy', linewidth=2)
    plt.plot(epochs, test_accuracies, 'r-', label='Test Accuracy', linewidth=2)
    plt.title('Training and Test Accuracy', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Accuracy', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 绘制过拟合分析
    plt.subplot(1, 3, 3)
    overfitting_gap = [train_acc - test_acc for train_acc, test_acc in zip(train_accuracies, test_accuracies)]
    plt.plot(epochs, overfitting_gap, 'g-', label='Overfitting Gap', linewidth=2)
    plt.title('Overfitting Analysis', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Train Acc - Test Acc', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.axhline(y=0, color='k', linestyle='--', alpha=0.5)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'training_curves.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"训练曲线已保存到: {os.path.join(output_dir, 'training_curves.png')}")

def visualize_samples(dataloader, output_dir, num_samples=16):
    """可视化数据样本（自动读取数据集类名）"""
    class_names = getattr(dataloader.dataset, "classes", None)
    if class_names is None:
        class_names = [str(i) for i in range(10)]

    fig, axes = plt.subplots(4, 4, figsize=(16, 16))
    fig.suptitle('样本可视化', fontsize=16)

    images, labels = next(iter(dataloader))
    for i in range(min(num_samples, len(images))):
        row, col = i // 4, i % 4
        img = images[i].clone()
        mean = torch.tensor([0.485, 0.456, 0.406]).view(3, 1, 1)
        std  = torch.tensor([0.229, 0.224, 0.225]).view(3, 1, 1)
        img = torch.clamp(img * std + mean, 0, 1).permute(1, 2, 0).numpy()

        label_idx = int(labels[i])
        title = class_names[label_idx] if label_idx < len(class_names) else f"Class {label_idx}"
        axes[row, col].imshow(img)
        axes[row, col].set_title(title, fontsize=12)
        axes[row, col].axis('off')

    plt.tight_layout()
    out_path = os.path.join(output_dir, 'data_samples.png')
    plt.savefig(out_path, dpi=300, bbox_inches='tight')
    plt.close()
    print(f"数据样本已保存到: {out_path}")

def get_model_size(model):
    """计算模型在内存中的大小（MB）"""
    param_size = 0
    buffer_size = 0

    for param in model.parameters():
        param_size += param.nelement() * param.element_size()

    for buffer in model.buffers():
        buffer_size += buffer.nelement() * buffer.element_size()

    total_size = param_size + buffer_size
    size_mb = total_size / (1024 * 1024)
    return size_mb

def save_eval_report(eval_res, output_dir, filename='eval_report.json'):
    path = os.path.join(output_dir, filename)
    with open(path, 'w', encoding='utf-8') as f:
        json.dump(eval_res, f, indent=2, ensure_ascii=False)
    print(f"评估结果已保存到: {path}")

def plot_confusion_matrix_simple(cm, class_names, output_dir, filename='confusion_matrix.png'):
    cm = np.array(cm)
    plt.figure(figsize=(8, 7))
    im = plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
    plt.colorbar(im)
    tick_marks = np.arange(len(class_names))
    plt.xticks(tick_marks, class_names, rotation=45, ha='right')
    plt.yticks(tick_marks, class_names)
    thresh = cm.max() / 2.0 if cm.size else 0
    for i in range(len(class_names)):
        for j in range(len(class_names)):
            plt.text(j, i, format(int(cm[i, j])), ha="center", va="center",
                     color="white" if cm[i, j] > thresh else "black")
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.tight_layout()
    out_path = os.path.join(output_dir, filename)
    plt.savefig(out_path, dpi=300, bbox_inches='tight')
    plt.close()
    print(f"混淆矩阵已保存到: {out_path}")

def main():
    # 1) 解析参数
    args = get_args()

    # 2) 准备输出目录
    args.output_dir = os.path.abspath(args.output_dir)
    if os.path.exists(args.output_dir) and not os.path.isdir(args.output_dir):
        os.remove(args.output_dir)
    os.makedirs(args.output_dir, exist_ok=True)

    # 3) 设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 4) 数据目录（create_dataloaders 内部会：若缺失则自动下载 Kaggle 并切分）
    data_dir = os.path.abspath(args.data_dir)

    # 5) 构建/加载模型
    model = load_resnet_model(args.model, args.num_classes, args.use_pretrained)

    # 可选：加载自定义预训练权重
    if getattr(args, "pretrained_path", None):
        if os.path.isfile(args.pretrained_path):
            print(f"加载自定义预训练权重: {args.pretrained_path}")
            state = torch.load(args.pretrained_path, map_location='cpu')
            try:
                model.load_state_dict(state, strict=False)
            except Exception as e:
                print(f"[警告] 加载预训练权重时出现问题（已使用 strict=False 尝试）：{e}")
        else:
            print(f"[警告] 未找到指定的预训练权重文件: {args.pretrained_path}")

    model.to(device)

    # 可选：Resume → 用于微调
    if getattr(args, "resume", False):
        if args.resume_path and os.path.isfile(args.resume_path):
            print(f"从 {args.resume_path} 恢复模型权重...")
            checkpoint = torch.load(args.resume_path, map_location=device)
            model.load_state_dict(checkpoint, strict=False)
            # 恢复后微调超参
            args.epochs = args.finetune_epochs
            args.learning_rate = args.finetune_lr
        else:
            raise FileNotFoundError(f"找不到 --resume-path 指定的文件: {args.resume_path}")

    # 6) DataLoader（若无数据则自动下载并切分）
    train_loader, test_loader = create_dataloaders(
        data_dir,
        batch_size=args.batch_size,
        dataset_size=args.dataset_size,
        img_size=args.img_size,
        num_classes=args.num_classes
    )

    # 可视化若干样本（可选）
    try:
        visualize_samples(train_loader, args.output_dir)
    except Exception as e:
        print(f"[提示] 可视化样本失败（忽略不影响训练）：{e}")

    # 7) 训练
    if args.mode in ['train', 'both']:
        stage = "微调阶段" if getattr(args, "resume", False) else "初始训练阶段"
        print(f"=== {stage} ===")
        model, best_acc, _ = train_model(model, train_loader, test_loader, device, args)
        ckpt_path = os.path.join(args.output_dir, f"{args.model}_trained.pth")
        torch.save(model.state_dict(), ckpt_path)
        print(f"训练完成，最佳测试准确率: {best_acc:.4f}，权重已保存至 {ckpt_path}")

    # 8) NAS（如启用）
    if args.mode in ['nas', 'both'] or getattr(args, "nas", False):
        print("=== NAS 架构搜索 ===")
        try:
            nas_acc = nas_train_model(train_loader, test_loader, args)
            print(f"NAS 模式完成，测试准确率: {nas_acc:.4f}")
        except Exception as e:
            print(f"[警告] NAS 运行失败（已跳过）：{e}")

    # 9) 压缩（占位提示：当前脚本未实现压缩流程）
    if args.mode == 'compress':
        print("[提示] 当前脚本未集成压缩流程（如 K-Means 权重共享 / 剪枝 等）。")
        print("如果需要，我可以把压缩函数（例如 parameter_sharing_kmeans）接到此处并评估。")

    # 10) 最终评估与输出
    print("=== 最终评估 ===")
    class_names = getattr(test_loader.dataset, 'classes', [str(i) for i in range(args.num_classes)])
    eval_res = comprehensive_evaluation(model, test_loader, device, class_names)
    save_eval_report(eval_res, args.output_dir)
    plot_confusion_matrix_simple(eval_res['confusion_matrix'], class_names, args.output_dir)

    print("全部流程完成。")


def plot_confusion_matrices(original_eval, finetuned_eval, class_names, output_dir):
    """绘制混淆矩阵对比"""
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 8))

    # 原始模型混淆矩阵
    im1 = ax1.imshow(original_eval['confusion_matrix'], interpolation='nearest', cmap=plt.cm.Blues)
    ax1.figure.colorbar(im1, ax=ax1)
    ax1.set(xticks=np.arange(len(class_names)),
            yticks=np.arange(len(class_names)),
            xticklabels=class_names, yticklabels=class_names,
            title=f"Original Model\nAccuracy: {original_eval['accuracy']:.3f}",
            ylabel='True label',
            xlabel='Predicted label')

    # 添加数值标注
    thresh1 = np.array(original_eval['confusion_matrix']).max() / 2.
    for i in range(len(class_names)):
        for j in range(len(class_names)):
            ax1.text(j, i, format(original_eval['confusion_matrix'][i][j], 'd'),
                     ha="center", va="center",
                     color="white" if original_eval['confusion_matrix'][i][j] > thresh1 else "black")

    # 压缩后模型混淆矩阵
    im2 = ax2.imshow(finetuned_eval['confusion_matrix'], interpolation='nearest', cmap=plt.cm.Blues)
    ax2.figure.colorbar(im2, ax=ax2)
    ax2.set(xticks=np.arange(len(class_names)),
            yticks=np.arange(len(class_names)),
            xticklabels=class_names, yticklabels=class_names,
            title=f"Compressed Model\nAccuracy: {finetuned_eval['accuracy']:.3f}",
            ylabel='True label',
            xlabel='Predicted label')

    # 添加数值标注
    thresh2 = np.array(finetuned_eval['confusion_matrix']).max() / 2.
    for i in range(len(class_names)):
        for j in range(len(class_names)):
            ax2.text(j, i, format(finetuned_eval['confusion_matrix'][i][j], 'd'),
                     ha="center", va="center",
                     color="white" if finetuned_eval['confusion_matrix'][i][j] > thresh2 else "black")

    plt.setp(ax1.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
    plt.setp(ax2.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'confusion_matrices_comparison.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"混淆矩阵对比已保存到: {os.path.join(output_dir, 'confusion_matrices_comparison.png')}")

if __name__ == "__main__":
    main()
