"""
改进的遥感图像分类模型训练和压缩示例 - ResNet50版本
解决过拟合问题，优化压缩策略，提升模型性能
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset, random_split
import copy
import time
import json
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import matplotlib
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, confusion_matrix
import shutil
import torchvision.datasets as datasets
from sklearn.model_selection import train_test_split
import glob
import random
import zipfile
import tarfile
try:
    import kagglehub
except Exception as _e:
    kagglehub = None
    print("提示：未安装 kagglehub 或导入失败。如需自动下载，请先安装：pip install kagglehub")


matplotlib.use('Agg')  # 使用非交互式后端
plt.rcParams['font.sans-serif'] = ['DejaVu Sans', 'Arial', 'sans-serif']  # 设置字体

def get_args():
    """解析命令行参数（含自动下载与自动兜底，直接点“运行”也能跑）"""
    parser = argparse.ArgumentParser(description="改进的ResNet50遥感图像分类模型训练和压缩")

    script_dir = os.path.dirname(os.path.abspath(__file__))
    default_data_dir = os.path.normpath(os.path.join(script_dir, "..", "data_split"))

    env_data_dir = os.environ.get("RS_DATA_ROOT")
    if env_data_dir and os.path.isdir(env_data_dir):
        default_data_dir = env_data_dir

    # 模型
    parser.add_argument("--model", "-m", default="resnet50",
                        choices=["resnet18", "resnet50", "resnet101", "efficientnet_b0"],
                        help="要使用的模型架构")

    # 数据与输出
    parser.add_argument("--data-dir", "-d", default=default_data_dir,
                        help="数据集根目录（应包含 train/ 与 test/ 或按类的原始文件夹）")
    parser.add_argument("--output-dir", "-o", default="./output",
                        help="模型与结果输出目录")
    parser.add_argument("--num-classes", type=int, default=10,
                        help="分类类别数（若与数据集解析结果不一致，将以数据集为准）")

    # 训练参数
    parser.add_argument("--epochs", "-e", type=int, default=25, help="训练轮数")
    parser.add_argument("--batch-size", "-b", type=int, default=16, help="批次大小")
    parser.add_argument("--learning-rate", "-lr", type=float, default=1e-3, help="基础学习率")
    parser.add_argument("--weight-decay", type=float, default=5e-4, help="权重衰减")
    parser.add_argument("--patience", type=int, default=8, help="early stopping patience（若你后续加上早停逻辑时可用）")
    parser.add_argument("--dataset-size", type=int, default=5000, help="数据集大小（可不使用，仅占位）")
    parser.add_argument("--img-size", type=int, default=224, help="输入图像尺寸")

    # 运行模式与预训练
    parser.add_argument("--mode", choices=['train', 'compress', 'both'], default='both',
                        help="运行模式：仅训练/仅压缩/两者都运行")
    parser.add_argument("--pretrained-path", type=str, default=None,
                        help="自定义预训练权重路径（.pth），若提供则会加载（strict=False）")
    parser.add_argument("--use-pretrained", action="store_true", default=True,
                        help="使用ImageNet预训练权重（默认启用）")

    # 自动下载与切分
    parser.add_argument("--auto-download", action="store_true", default=False,
                        help="当 data-dir 不含 train/test 且无类别结构时，自动下载 Kaggle 数据集并切分")
    parser.add_argument("--kaggle-dataset", type=str,
                        default="mahmoudreda55/satellite-image-classification",
                        help="Kaggle 数据集标识（kagglehub 用），例如 'user/dataset-name'")
    parser.add_argument("--test-size", type=float, default=0.2,
                        help="自动划分测试集比例（0~1）")

    args = parser.parse_args()

    if not (0.0 < args.test_size < 1.0):
        raise ValueError(f"--test-size 必须在 (0,1) 区间内，当前为 {args.test_size}")

    # 自动兜底：直接点“运行”时没参数也能触发下载
    def _has_train_test(dirpath: str) -> bool:
        return (os.path.isdir(os.path.join(dirpath, "train")) and
                os.path.isdir(os.path.join(dirpath, "test")))

    data_dir_exists = os.path.isdir(args.data_dir)
    data_ready = data_dir_exists and _has_train_test(args.data_dir)

    if not data_ready:
        if not data_dir_exists:
            print(f"[提示] 数据目录不存在: {args.data_dir}")
            print(r"👉 可使用 --data-dir 或环境变量 RS_DATA_ROOT 指定，例如：")
            print(r'   python main.py --data-dir "../../实习数据集/data_split"')
        if not args.auto_download:
            print("[提示] 未检测到可用数据集，已自动开启 --auto-download，并将使用 Kaggle 数据集。")
            args.auto_download = True
        if (not data_dir_exists) or (os.path.normpath(args.data_dir) == os.path.normpath(default_data_dir)):
            kaggle_dir = os.path.normpath(os.path.join(script_dir, "..", "data_split_kaggle"))
            if os.path.normpath(args.data_dir) != kaggle_dir:
                print(f"[提示] 将数据目录切换为（用于自动下载与切分）：{kaggle_dir}")
            args.data_dir = kaggle_dir

    return args

def is_image_file(p):
    return os.path.splitext(p)[1].lower() in {".jpg",".jpeg",".png",".bmp",".tif",".tiff",".webp"}

def safe_copy(src, dst):
    os.makedirs(os.path.dirname(dst), exist_ok=True)
    if not os.path.isfile(dst):
        shutil.copy2(src, dst)

def extract_if_archive(path):
    """若是 zip/tar 则解压到同目录同名文件夹，返回解压后的目录；否则原样返回。"""
    if os.path.isdir(path):
        return path
    base = os.path.dirname(path)
    name = os.path.splitext(os.path.basename(path))[0]
    out_dir = os.path.join(base, name)
    if zipfile.is_zipfile(path):
        os.makedirs(out_dir, exist_ok=True)
        with zipfile.ZipFile(path) as zf:
            zf.extractall(out_dir)
        return out_dir
    if tarfile.is_tarfile(path):
        os.makedirs(out_dir, exist_ok=True)
        with tarfile.open(path) as tf:
            tf.extractall(out_dir)
        return out_dir
    return path

def find_class_root(search_root):
    """
    递归寻找“类别文件夹结构”的根目录：
    该目录下存在 >=2 个子文件夹，且这些子文件夹里能找到图像文件。
    """
    subdirs = [d for d in os.listdir(search_root) if os.path.isdir(os.path.join(search_root, d))]
    candidate = []
    for sd in subdirs:
        sd_path = os.path.join(search_root, sd)
        imgs = [f for f in os.listdir(sd_path) if is_image_file(f)]
        if imgs:
            candidate.append(sd_path)
    if len(candidate) >= 2:
        return search_root
    for sd in subdirs:
        sub = os.path.join(search_root, sd)
        res = find_class_root(sub)
        if res is not None:
            return res
    return None

def auto_download_kaggle_dataset(kaggle_dataset: str) -> str:
    """使用 kagglehub 下载数据集并返回本地目录（必要时自动解压）"""
    if kagglehub is None:
        raise RuntimeError("kagglehub 未安装或导入失败。请先 pip install kagglehub")
    print(f"开始使用 kagglehub 下载数据集：{kaggle_dataset}")
    path = kagglehub.dataset_download(kaggle_dataset)
    print("Kaggle 数据集下载完成，本地路径：", path)
    return extract_if_archive(path)

def materialize_raw_to_datadir(raw_root: str, data_dir: str):
    """
    将类别根目录下的 class 子目录拷贝到 data_dir/_raw 下（仅汇总，不切分）。
    """
    class_root = find_class_root(raw_root)
    if class_root is None:
        raise RuntimeError(f"未在 {raw_root} 找到类别文件夹结构（至少两个类别子文件夹且有图片）")
    raw_out = os.path.join(data_dir, "_raw")
    os.makedirs(raw_out, exist_ok=True)
    for cls in sorted(os.listdir(class_root)):
        src = os.path.join(class_root, cls)
        if not os.path.isdir(src):
            continue
        imgs = [f for f in os.listdir(src) if is_image_file(f)]
        if not imgs:
            continue
        dst = os.path.join(raw_out, cls)
        os.makedirs(dst, exist_ok=True)
        for f in imgs:
            safe_copy(os.path.join(src, f), os.path.join(dst, f))
    print(f"已将原始类别数据复制到：{raw_out}")
    return raw_out

def split_from_raw(raw_root: str, out_root: str, test_size: float, seed: int = 42):
    """从 raw_root（classA/, classB/...）切分到 out_root/train & out_root/test"""
    random.seed(seed)
    train_root = os.path.join(out_root, "train")
    test_root = os.path.join(out_root, "test")
    os.makedirs(train_root, exist_ok=True)
    os.makedirs(test_root, exist_ok=True)

    classes = [d for d in os.listdir(raw_root) if os.path.isdir(os.path.join(raw_root, d))]
    for cls in classes:
        src = os.path.join(raw_root, cls)
        imgs = [f for f in os.listdir(src) if is_image_file(f)]
        if not imgs:
            print(f"⚠️ 类别 {cls} 无图片，跳过"); continue
        random.shuffle(imgs)
        n_test = max(1, int(len(imgs) * test_size)) if len(imgs) >= 5 else max(1, len(imgs)//5)
        test_files = set(imgs[:n_test])
        train_files = imgs[n_test:]

        os.makedirs(os.path.join(train_root, cls), exist_ok=True)
        os.makedirs(os.path.join(test_root, cls), exist_ok=True)
        for f in train_files:
            safe_copy(os.path.join(src, f), os.path.join(train_root, cls, f))
        for f in test_files:
            safe_copy(os.path.join(src, f), os.path.join(test_root, cls, f))
    print(f"✅ 已生成划分：{out_root}（train/test）")
    return out_root

def prepare_dataset_entrypoint(data_dir: str, auto_download: bool, kaggle_dataset: str, test_size: float) -> str:
    """
    返回可直接被 ImageFolder 使用的根目录：
      - 若 data_dir 已含 train/test -> 直接返回；
      - 否则若 data_dir 内部存在可识别的类别结构 -> 切分到 data_dir/train,test；
      - 否则若 auto_download=True -> 从 Kaggle 下载，复制到 data_dir/_raw，再切分；
      - 否则抛错。
    """
    if os.path.isdir(os.path.join(data_dir, "train")) and os.path.isdir(os.path.join(data_dir, "test")):
        return data_dir

    os.makedirs(data_dir, exist_ok=True)
    local_class_root = find_class_root(data_dir)
    if local_class_root:
        print(f"在 {data_dir} 找到类别根：{local_class_root}，开始切分...")
        tmp_raw = os.path.join(data_dir, "_raw_local")
        if not os.path.isdir(tmp_raw):
            for cls in os.listdir(local_class_root):
                src = os.path.join(local_class_root, cls)
                if os.path.isdir(src):
                    imgs = [f for f in os.listdir(src) if is_image_file(f)]
                    if imgs:
                        dst = os.path.join(tmp_raw, cls)
                        os.makedirs(dst, exist_ok=True)
                        for f in imgs:
                            safe_copy(os.path.join(src, f), os.path.join(dst, f))
        split_from_raw(tmp_raw, data_dir, test_size=test_size)
        return data_dir

    if auto_download:
        raw_path = auto_download_kaggle_dataset(kaggle_dataset)
        raw_root = materialize_raw_to_datadir(raw_path, data_dir)
        split_from_raw(raw_root, data_dir, test_size=test_size)
        return data_dir

    raise RuntimeError(
        f"在 {data_dir} 未发现 train/test 或可识别的类别结构，且未启用 --auto-download。"
        " 请手动整理类别文件夹，或加 --auto-download 让程序自动下载并准备。"
    )


def split_dataset_if_needed(data_dir, test_size=0.2, seed=42):
    """
    若 data_dir 下无 train/test，则把按类别的原始文件夹结构
    复制并划分到 data_dir_split/train|test。返回可用于 ImageFolder 的根目录。
    """
    if os.path.isdir(os.path.join(data_dir, "train")) and os.path.isdir(os.path.join(data_dir, "test")):
        return data_dir

    class_dirs = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))]
    class_dirs = [d for d in class_dirs if d.lower() not in ["train", "test", "val", "valid", "validation", "images", "labels"]]
    if len(class_dirs) == 0:
        raise RuntimeError(f"在 {data_dir} 未找到类别文件夹，请确认路径。")

    out_root = data_dir.rstrip("/\\") + "_split"
    train_root = os.path.join(out_root, "train")
    test_root  = os.path.join(out_root, "test")
    os.makedirs(train_root, exist_ok=True)
    os.makedirs(test_root,  exist_ok=True)

    exts = {".jpg", ".jpeg", ".png", ".bmp", ".tif", ".tiff", ".webp"}
    for cls in class_dirs:
        src = os.path.join(data_dir, cls)
        imgs = [f for f in os.listdir(src) if os.path.splitext(f)[1].lower() in exts]
        if not imgs:
            print(f"⚠️ {cls} 无图片，跳过。"); continue

        tr, te = train_test_split(imgs, test_size=test_size, random_state=seed, shuffle=True)
        os.makedirs(os.path.join(train_root, cls), exist_ok=True)
        os.makedirs(os.path.join(test_root,  cls), exist_ok=True)
        for fn in tr: shutil.copy(os.path.join(src, fn), os.path.join(train_root, cls, fn))
        for fn in te: shutil.copy(os.path.join(src, fn), os.path.join(test_root,  cls, fn))

    print(f"✅ 已生成划分：{out_root}")
    return out_root


def get_transforms(img_size, is_train=True):
    if is_train:
        return transforms.Compose([
            transforms.Resize((img_size, img_size)),
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.RandomVerticalFlip(p=0.2),
            transforms.RandomRotation(15),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225]),
        ])
    else:
        return transforms.Compose([
            transforms.Resize((img_size, img_size)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225]),
        ])


def load_resnet_model(model_name, num_classes=10, use_pretrained=True):
    """加载带有Dropout的ResNet模型"""
    print(f"加载 {model_name} 模型...")

    if model_name == "resnet18":
        from torchvision.models import resnet18, ResNet18_Weights
        if use_pretrained:
            model = resnet18(weights=ResNet18_Weights.IMAGENET1K_V1)
            print("使用ImageNet预训练的ResNet18")
        else:
            model = resnet18(weights=None)
            print("使用随机初始化的ResNet18")

        # 添加Dropout的分类器
        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "resnet50":
        from torchvision.models import resnet50, ResNet50_Weights
        if use_pretrained:
            model = resnet50(weights=ResNet50_Weights.IMAGENET1K_V2)
            print("使用ImageNet预训练的ResNet50")
        else:
            model = resnet50(weights=None)
            print("使用随机初始化的ResNet50")

        # 添加Dropout的分类器
        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "resnet101":
        from torchvision.models import resnet101, ResNet101_Weights
        if use_pretrained:
            model = resnet101(weights=ResNet101_Weights.IMAGENET1K_V2)
            print("使用ImageNet预训练的ResNet101")
        else:
            model = resnet101(weights=None)
            print("使用随机初始化的ResNet101")

        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "efficientnet_b0":
        from torchvision.models import efficientnet_b0, EfficientNet_B0_Weights
        if use_pretrained:
            model = efficientnet_b0(weights=EfficientNet_B0_Weights.IMAGENET1K_V1)
            print("使用ImageNet预训练的EfficientNet-B0")
        else:
            model = efficientnet_b0(weights=None)
            print("使用随机初始化的EfficientNet-B0")

        model.classifier = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.classifier[1].in_features, num_classes)
        )

    else:
        raise ValueError(f"不支持的模型: {model_name}")

    return model


def create_dataloaders(data_dir, batch_size=16, dataset_size=None, img_size=224, num_classes=None,
                       auto_download=False, kaggle_dataset="mahmoudreda55/satellite-image-classification",
                       test_size=0.2):
    """
    使用真实图片数据：
    - 优先使用 data_dir/train,test；
    - 否则按需下载 Kaggle 数据集并自动切分。
    """
    print("准备真实卫星分类数据集...")
    root = prepare_dataset_entrypoint(
        data_dir=data_dir,
        auto_download=auto_download,
        kaggle_dataset=kaggle_dataset,
        test_size=test_size
    )

    train_dir = os.path.join(root, "train")
    test_dir  = os.path.join(root, "test")

    train_dataset = datasets.ImageFolder(train_dir, transform=get_transforms(img_size, True))
    test_dataset  = datasets.ImageFolder(test_dir,  transform=get_transforms(img_size, False))

    if num_classes is not None and num_classes != len(train_dataset.classes):
        print(f"提示：命令行 num_classes={num_classes} 与数据集类别数 {len(train_dataset.classes)} 不一致，将以数据集为准：{len(train_dataset.classes)} 类。")

    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                              num_workers=4, pin_memory=True, drop_last=True)
    test_loader  = DataLoader(test_dataset,  batch_size=batch_size, shuffle=False,
                              num_workers=4, pin_memory=True)

    print(f"训练集: {len(train_dataset)}，测试集: {len(test_dataset)}，类别: {train_dataset.classes}")
    return train_loader, test_loader


def train_model(model, train_loader, test_loader, device, args):
    """改进的训练函数，解决过拟合问题"""
    print("开始训练ResNet模型...")

    # 损失函数（带标签平滑）
    criterion = nn.CrossEntropyLoss(label_smoothing=0.1)

    # 分层学习率设置
    if args.use_pretrained:
        backbone_params, classifier_params = [], []
        for name, param in model.named_parameters():
            if 'fc' in name:
                classifier_params.append(param)
            else:
                backbone_params.append(param)
        optimizer = optim.AdamW([
            {'params': backbone_params, 'lr': args.learning_rate * 0.1},
            {'params': classifier_params, 'lr': args.learning_rate}
        ], weight_decay=args.weight_decay)
    else:
        optimizer = optim.AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)

    # One-Cycle 调度器（按 epoch 更新）
    scheduler = optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=args.learning_rate,
        epochs=args.epochs,
        steps_per_epoch=len(train_loader),
        pct_start=0.3
    )

    train_losses, train_accuracies, test_accuracies = [], [], []
    best_test_acc = 0.0
    best_model_state = None

    for epoch in range(args.epochs):
        model.train()
        running_loss = 0.0
        correct_train = 0
        total_train = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for images, labels in pbar:
            images, labels = images.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()

            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)

            optimizer.step()
            # —— 不要在这里 scheduler.step() ——

            # 记录
            running_loss += loss.item()
            pred = outputs.argmax(dim=1)
            total_train += labels.size(0)
            correct_train += (pred == labels).sum().item()

            current_lr = scheduler.get_last_lr()[0]
            pbar.set_postfix({
                'Loss': f'{loss.item():.4f}',
                'Acc': f'{100. * correct_train / total_train:.2f}%',
                'LR': f'{current_lr:.6f}'
            })

        # —— Epoch 结束后更新学习率
        scheduler.step()

        # 验证
        avg_train_loss = running_loss / len(train_loader)
        train_acc = correct_train / total_train
        test_acc = evaluate_model(model, test_loader, device)

        train_losses.append(avg_train_loss)
        train_accuracies.append(train_acc)
        test_accuracies.append(test_acc)

        if test_acc > best_test_acc:
            best_test_acc = test_acc
            best_model_state = copy.deepcopy(model.state_dict())

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train Acc: {train_acc:.4f}, '
              f'Test Acc: {test_acc:.4f}, '
              f'Best Test Acc: {best_test_acc:.4f}')

    # 加载表现最好的模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f'训练完成! 最佳测试准确率: {best_test_acc:.4f}')

    # 绘制曲线
    plot_training_curves(train_losses, train_accuracies, test_accuracies, args.output_dir)

    return model, best_test_acc, {
        'train_losses': train_losses,
        'train_accuracies': train_accuracies,
        'test_accuracies': test_accuracies,
        'best_test_acc': best_test_acc
    }

def evaluate_model(model, dataloader, device):
    """评估分类模型准确率"""
    model.eval()
    correct = 0
    total = 0

    with torch.no_grad():
        for images, labels in dataloader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = correct / total if total > 0 else 0
    return accuracy


def comprehensive_evaluation(model, dataloader, device, class_names):
    """全面评估模型性能"""
    model.eval()
    y_true = []
    y_pred = []
    y_probs = []

    with torch.no_grad():
        for images, labels in dataloader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            probs = F.softmax(outputs, dim=1)
            _, predicted = torch.max(outputs, 1)

            y_true.extend(labels.cpu().numpy())
            y_pred.extend(predicted.cpu().numpy())
            y_probs.extend(probs.cpu().numpy())

    # 计算各种指标


    accuracy = accuracy_score(y_true, y_pred)
    precision, recall, f1, _ = precision_recall_fscore_support(y_true, y_pred, average='macro')
    cm = confusion_matrix(y_true, y_pred)

    return {
        'accuracy': accuracy,
        'precision': precision,
        'recall': recall,
        'f1_score': f1,
        'confusion_matrix': cm.tolist(),
        'y_true': y_true,
        'y_pred': y_pred
    }


def plot_training_curves(train_losses, train_accuracies, test_accuracies, output_dir):
    """绘制训练曲线"""
    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(15, 5))

    # 绘制损失曲线
    plt.subplot(1, 3, 1)
    plt.plot(epochs, train_losses, 'b-', label='Training Loss', linewidth=2)
    plt.title('Training Loss', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Loss', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 绘制准确率曲线
    plt.subplot(1, 3, 2)
    plt.plot(epochs, train_accuracies, 'b-', label='Training Accuracy', linewidth=2)
    plt.plot(epochs, test_accuracies, 'r-', label='Test Accuracy', linewidth=2)
    plt.title('Training and Test Accuracy', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Accuracy', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 绘制过拟合分析
    plt.subplot(1, 3, 3)
    overfitting_gap = [train_acc - test_acc for train_acc, test_acc in zip(train_accuracies, test_accuracies)]
    plt.plot(epochs, overfitting_gap, 'g-', label='Overfitting Gap', linewidth=2)
    plt.title('Overfitting Analysis', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Train Acc - Test Acc', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.axhline(y=0, color='k', linestyle='--', alpha=0.5)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'training_curves.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"训练曲线已保存到: {os.path.join(output_dir, 'training_curves.png')}")


def visualize_samples(dataloader, output_dir, num_samples=16):
    """可视化数据样本（自动读取数据集类别名）"""
    class_names = dataloader.dataset.classes

    rows = cols = int(np.ceil(np.sqrt(min(num_samples, len(dataloader.dataset)))))
    fig, axes = plt.subplots(rows, cols, figsize=(4*cols, 4*rows))
    axes = np.array(axes).reshape(-1)

    images, labels = next(iter(dataloader))
    mean = torch.tensor([0.485,0.456,0.406]).view(3,1,1)
    std  = torch.tensor([0.229,0.224,0.225]).view(3,1,1)

    for i in range(min(num_samples, len(images))):
        img = images[i].clone() * std + mean
        img = torch.clamp(img, 0, 1).permute(1,2,0).numpy()
        axes[i].imshow(img)
        axes[i].set_title(f'{class_names[labels[i]]} (#{labels[i]})', fontsize=11)
        axes[i].axis('off')

    for j in range(i+1, len(axes)):
        axes[j].axis('off')

    plt.tight_layout()
    path = os.path.join(output_dir, 'data_samples.png')
    plt.savefig(path, dpi=300, bbox_inches='tight')
    plt.close()
    print(f"数据样本已保存到: {path}")

def get_model_size(model):
    """计算模型在内存中的大小（MB）"""
    param_size = 0
    buffer_size = 0

    for param in model.parameters():
        param_size += param.nelement() * param.element_size()

    for buffer in model.buffers():
        buffer_size += buffer.nelement() * buffer.element_size()

    total_size = param_size + buffer_size
    size_mb = total_size / (1024 * 1024)
    return size_mb

def count_parameters(model):
    return sum(p.numel() for p in model.parameters())

def report_model_efficiency(model, device, test_loader, output_dir):
    """
    输出/保存模型效率：
    - parameters_total：参数总数
    - model_memory_MB_estimated：模型参数+缓冲区在内存中的体积估计
    - weights_disk_MB：权重磁盘大小
    - inference_peak_memory_MB：一次前向的峰值显存（CUDA）或参数内存估计（CPU）
    """
    os.makedirs(output_dir, exist_ok=True)

    params = count_parameters(model)
    size_mb = get_model_size(model)

    tmp_path = os.path.join(output_dir, "tmp_measure.pth")
    torch.save(model.state_dict(), tmp_path)
    disk_mb = os.path.getsize(tmp_path) / (1024*1024)
    os.remove(tmp_path)

    peak_mem_mb = None
    try:
        if device.type == "cuda":
            torch.cuda.reset_peak_memory_stats(device)
            model.eval()
            with torch.no_grad():
                images, _ = next(iter(test_loader))
                images = images.to(device)
                _ = model(images)
            peak_mem_mb = torch.cuda.max_memory_allocated(device) / (1024*1024)
        else:
            peak_mem_mb = params * 4 / (1024*1024)  # FP32 粗略估计
    except Exception as e:
        print(f"内存占用测量失败：{e}")

    report = {
        "parameters_total": int(params),
        "model_memory_MB_estimated": float(size_mb),
        "weights_disk_MB": float(disk_mb),
        "inference_peak_memory_MB": float(peak_mem_mb) if peak_mem_mb is not None else None
    }
    with open(os.path.join(output_dir, "model_efficiency_report.json"), "w", encoding="utf-8") as f:
        json.dump(report, f, ensure_ascii=False, indent=2)
    print("模型效率报告：", json.dumps(report, ensure_ascii=False, indent=2))
    return report


def main():
    """主入口：自动准备数据 → 训练 → （可选）张量分解+微调 → 最终评估 → 效率报告"""
    args = get_args()
    os.makedirs(args.output_dir, exist_ok=True)

    # 设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 构建/加载模型
    model = load_resnet_model(args.model, args.num_classes, args.use_pretrained)
    model.to(device)

    # 可选加载外部预训练权重（strict=False 以容忍层名不完全一致）
    if args.pretrained_path:
        ckpt = torch.load(args.pretrained_path, map_location=device)
        state_dict = ckpt.get('state_dict', ckpt)
        model.load_state_dict(state_dict, strict=False)
        print(f"已加载预训练权重：{args.pretrained_path}")

    # === 训练阶段 ===
    if args.mode in ['train', 'both']:
        print("=== 训练阶段 ===")
        train_loader, test_loader = create_dataloaders(
            args.data_dir, args.batch_size, args.dataset_size, args.img_size, args.num_classes,
            auto_download=args.auto_download,
            kaggle_dataset=args.kaggle_dataset,
            test_size=args.test_size
        )
        visualize_samples(train_loader, args.output_dir)

        model, best_acc, history = train_model(model, train_loader, test_loader, device, args)
        trained_ckpt = os.path.join(args.output_dir, f"{args.model}_trained.pth")
        torch.save(model.state_dict(), trained_ckpt)
        print(f"训练完成，最佳测试准确率: {best_acc:.4f}，权重保存至: {trained_ckpt}")

    # === 张量分解（作为“压缩”示例）+ 微调 ===
    if args.mode in ['compress', 'both']:
        print("=== 张量分解阶段（Linear/Conv2d 低秩近似）===")
        ranks = getattr(args, "rank_list", None) or []
        use_explicit = len(ranks) > 0
        idx = 0

        for name, module in model.named_modules():
            if isinstance(module, nn.Linear):
                W = module.weight.data
                rank = ranks[min(idx, len(ranks)-1)] if use_explicit else max(1, min(W.size(0), W.size(1)) // 8)
                idx += 1

                U, S, Vh = torch.linalg.svd(W, full_matrices=False)
                W_approx = (U[:, :rank] * S[:rank]) @ Vh[:rank, :]
                module.weight.data.copy_(W_approx)

            elif isinstance(module, nn.Conv2d):
                W = module.weight.data
                out_c, in_c, kh, kw = W.shape
                W_mat = W.view(out_c, in_c * kh * kw)
                rank = ranks[min(idx, len(ranks)-1)] if use_explicit else max(1, min(W_mat.size(0), W_mat.size(1)) // 8)
                idx += 1

                U, S, Vh = torch.linalg.svd(W_mat, full_matrices=False)
                W_mat_approx = (U[:, :rank] * S[:rank]) @ Vh[:rank, :]
                module.weight.data.copy_(W_mat_approx.view_as(W))

        td_path = os.path.join(args.output_dir, f"{args.model}_td.pth")
        torch.save(model.state_dict(), td_path)
        print(f"张量分解后模型已保存到: {td_path}")

        # 微调（降低学习率、少量轮次）
        print("=== 微调张量分解模型 ===")
        train_loader, test_loader = create_dataloaders(
            args.data_dir, args.batch_size, args.dataset_size, args.img_size, args.num_classes,
            auto_download=args.auto_download,
            kaggle_dataset=args.kaggle_dataset,
            test_size=args.test_size
        )
        model.to(device)

        orig_epochs, orig_lr = args.epochs, args.learning_rate
        args.epochs = getattr(args, "finetune_epochs", 3)
        args.learning_rate = orig_lr * getattr(args, "finetune_lr_scale", 0.1)

        model, tuned_acc, _ = train_model(model, train_loader, test_loader, device, args)
        print(f"微调完成，测试准确率: {tuned_acc:.4f}")

        # 恢复超参
        args.epochs, args.learning_rate = orig_epochs, orig_lr

    # === 最终评估 ===
    print("=== 最终评估 ===")
    train_loader_tmp, test_loader = create_dataloaders(
        args.data_dir, args.batch_size, args.dataset_size, args.img_size, args.num_classes,
        auto_download=args.auto_download,
        kaggle_dataset=args.kaggle_dataset,
        test_size=args.test_size
    )
    class_names = getattr(train_loader_tmp.dataset, "classes", [str(i) for i in range(args.num_classes)])

    model.to(device).eval()
    eval_res = comprehensive_evaluation(model, test_loader, device, class_names)
    print("最终评估结果:")
    print(json.dumps({
        'accuracy':  eval_res['accuracy'],
        'precision': eval_res['precision'],
        'recall':    eval_res['recall'],
        'f1_score':  eval_res['f1_score']
    }, indent=2, ensure_ascii=False))

    # 效率报告
    _ = report_model_efficiency(model, device, test_loader, args.output_dir)

def plot_confusion_matrices(original_eval, finetuned_eval, class_names, output_dir):
    """绘制混淆矩阵对比"""
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 8))

    # 原始模型混淆矩阵
    im1 = ax1.imshow(original_eval['confusion_matrix'], interpolation='nearest', cmap=plt.cm.Blues)
    ax1.figure.colorbar(im1, ax=ax1)
    ax1.set(xticks=np.arange(len(class_names)),
            yticks=np.arange(len(class_names)),
            xticklabels=class_names, yticklabels=class_names,
            title=f"Original Model\nAccuracy: {original_eval['accuracy']:.3f}",
            ylabel='True label',
            xlabel='Predicted label')

    # 添加数值标注
    thresh1 = np.array(original_eval['confusion_matrix']).max() / 2.
    for i in range(len(class_names)):
        for j in range(len(class_names)):
            ax1.text(j, i, format(original_eval['confusion_matrix'][i][j], 'd'),
                     ha="center", va="center",
                     color="white" if original_eval['confusion_matrix'][i][j] > thresh1 else "black")

    # 压缩后模型混淆矩阵
    im2 = ax2.imshow(finetuned_eval['confusion_matrix'], interpolation='nearest', cmap=plt.cm.Blues)
    ax2.figure.colorbar(im2, ax=ax2)
    ax2.set(xticks=np.arange(len(class_names)),
            yticks=np.arange(len(class_names)),
            xticklabels=class_names, yticklabels=class_names,
            title=f"Compressed Model\nAccuracy: {finetuned_eval['accuracy']:.3f}",
            ylabel='True label',
            xlabel='Predicted label')

    # 添加数值标注
    thresh2 = np.array(finetuned_eval['confusion_matrix']).max() / 2.
    for i in range(len(class_names)):
        for j in range(len(class_names)):
            ax2.text(j, i, format(finetuned_eval['confusion_matrix'][i][j], 'd'),
                     ha="center", va="center",
                     color="white" if finetuned_eval['confusion_matrix'][i][j] > thresh2 else "black")

    plt.setp(ax1.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
    plt.setp(ax2.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'confusion_matrices_comparison.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"混淆矩阵对比已保存到: {os.path.join(output_dir, 'confusion_matrices_comparison.png')}")

if __name__ == "__main__":
    main()