"""
改进的遥感图像分类模型训练和压缩示例 - ResNet50版本
解决过拟合问题，优化压缩策略，提升模型性能
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset, random_split
import copy
import time
import json
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import matplotlib
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, confusion_matrix
import torchvision.datasets as datasets
from PIL import Image
import random
import zipfile
import tarfile
import shutil

try:
    import kagglehub
except Exception:
    kagglehub = None
    print("提示：未安装 kagglehub 或导入失败。如需自动下载，请先安装：pip install kagglehub")

def _is_image_file(p):
    return os.path.splitext(p)[1].lower() in {".jpg", ".jpeg", ".png", ".bmp", ".tif", ".tiff", ".webp"}

def _safe_copy(src, dst):
    os.makedirs(os.path.dirname(dst), exist_ok=True)
    if not os.path.isfile(dst):
        shutil.copy2(src, dst)

def _extract_if_archive(path):
    """若是 zip/tar 则解压到同目录同名文件夹，返回解压后的目录；否则原样返回。"""
    if os.path.isdir(path):
        return path
    base = os.path.dirname(path)
    name = os.path.splitext(os.path.basename(path))[0]
    out_dir = os.path.join(base, name)
    if zipfile.is_zipfile(path):
        os.makedirs(out_dir, exist_ok=True)
        with zipfile.ZipFile(path) as zf:
            zf.extractall(out_dir)
        return out_dir
    if tarfile.is_tarfile(path):
        os.makedirs(out_dir, exist_ok=True)
        with tarfile.open(path) as tf:
            tf.extractall(out_dir)
        return out_dir
    return path

def _find_class_root(search_root):
    """
    递归寻找“类别文件夹结构”的根目录：该目录下存在 ≥2 个子文件夹，且这些子文件夹里能找到图像文件。
    """
    if not os.path.isdir(search_root):
        return None
    subdirs = [d for d in os.listdir(search_root) if os.path.isdir(os.path.join(search_root, d))]
    candidate = []
    for sd in subdirs:
        sd_path = os.path.join(search_root, sd)
        imgs = [f for f in os.listdir(sd_path) if _is_image_file(f)]
        if imgs:
            candidate.append(sd_path)
    if len(candidate) >= 2:
        return search_root
    for sd in subdirs:
        res = _find_class_root(os.path.join(search_root, sd))
        if res is not None:
            return res
    return None

def _auto_download_kaggle_dataset(kaggle_dataset: str) -> str:
    """使用 kagglehub 下载数据集并返回本地目录（必要时自动解压）"""
    if kagglehub is None:
        raise RuntimeError("kagglehub 未安装或导入失败。请先 pip install kagglehub")
    print(f"开始使用 kagglehub 下载数据集：{kaggle_dataset}")
    path = kagglehub.dataset_download(kaggle_dataset)
    print("Kaggle 数据集下载完成，本地路径：", path)
    return _extract_if_archive(path)

def _materialize_raw_to_datadir(raw_root: str, data_dir: str):
    """将类别根目录下的 class 子目录拷贝到 data_dir/_raw 下（仅汇总，不切分）。"""
    class_root = _find_class_root(raw_root)
    if class_root is None:
        raise RuntimeError(f"未在 {raw_root} 找到类别文件夹结构（至少两个类别子文件夹且有图片）")
    raw_out = os.path.join(data_dir, "_raw")
    os.makedirs(raw_out, exist_ok=True)
    for cls in sorted(os.listdir(class_root)):
        src = os.path.join(class_root, cls)
        if not os.path.isdir(src):
            continue
        imgs = [f for f in os.listdir(src) if _is_image_file(f)]
        if not imgs:
            continue
        dst = os.path.join(raw_out, cls)
        os.makedirs(dst, exist_ok=True)
        for f in imgs:
            _safe_copy(os.path.join(src, f), os.path.join(dst, f))
    print(f"已将原始类别数据复制到：{raw_out}")
    return raw_out

def _split_from_raw(raw_root: str, out_root: str, test_size: float, seed: int = 42):
    """从 raw_root（classA/, classB/...）切分到 out_root/train & out_root/test"""
    random.seed(seed)
    train_root = os.path.join(out_root, "train")
    test_root = os.path.join(out_root, "test")
    os.makedirs(train_root, exist_ok=True)
    os.makedirs(test_root, exist_ok=True)

    classes = [d for d in os.listdir(raw_root) if os.path.isdir(os.path.join(raw_root, d))]
    for cls in classes:
        src = os.path.join(raw_root, cls)
        imgs = [f for f in os.listdir(src) if _is_image_file(f)]
        if not imgs:
            print(f"⚠️ 类别 {cls} 无图片，跳过"); continue
        random.shuffle(imgs)
        n_test = max(1, int(len(imgs) * test_size)) if len(imgs) >= 5 else max(1, len(imgs)//5)
        test_files = set(imgs[:n_test])
        train_files = imgs[n_test:]

        os.makedirs(os.path.join(train_root, cls), exist_ok=True)
        os.makedirs(os.path.join(test_root, cls), exist_ok=True)
        for f in train_files:
            _safe_copy(os.path.join(src, f), os.path.join(train_root, cls, f))
        for f in test_files:
            _safe_copy(os.path.join(src, f), os.path.join(test_root, cls, f))
    print(f"✅ 已生成划分：{out_root}（train/test）")
    return out_root

def _prepare_dataset_entrypoint(data_dir: str, auto_download: bool, kaggle_dataset: str, test_size: float) -> str:
    """
    返回可直接被 ImageFolder 使用的根目录：
      - 若 data_dir 已含 train/test -> 直接返回；
      - 否则若 data_dir 内部存在可识别的类别结构 -> 切分到 data_dir/train,test；
      - 否则若 auto_download=True -> 从 Kaggle 下载，复制到 data_dir/_raw，再切分；
      - 否则抛错。
    """
    if os.path.isdir(os.path.join(data_dir, "train")) and os.path.isdir(os.path.join(data_dir, "test")):
        return data_dir

    os.makedirs(data_dir, exist_ok=True)
    local_class_root = _find_class_root(data_dir)
    if local_class_root:
        print(f"在 {data_dir} 找到类别根：{local_class_root}，开始切分...")
        tmp_raw = os.path.join(data_dir, "_raw_local")
        if not os.path.isdir(tmp_raw):
            for cls in os.listdir(local_class_root):
                src = os.path.join(local_class_root, cls)
                if os.path.isdir(src):
                    imgs = [f for f in os.listdir(src) if _is_image_file(f)]
                    if imgs:
                        dst = os.path.join(tmp_raw, cls)
                        os.makedirs(dst, exist_ok=True)
                        for f in imgs:
                            _safe_copy(os.path.join(src, f), os.path.join(dst, f))
        _split_from_raw(tmp_raw, data_dir, test_size=test_size)
        return data_dir

    if auto_download:
        raw_path = _auto_download_kaggle_dataset(kaggle_dataset)
        raw_root = _materialize_raw_to_datadir(raw_path, data_dir)
        _split_from_raw(raw_root, data_dir, test_size=test_size)
        return data_dir

    raise RuntimeError(
        f"在 {data_dir} 未发现 train/test 或可识别的类别结构，且未启用 --auto-download。"
        " 请手动整理类别文件夹，或加 --auto-download 让程序自动下载并准备。"
    )

class EnsureRGB(object):
    """确保输入为 RGB（三通道），避免灰度/四通道导致归一化维度不匹配"""
    def __call__(self, img):
        if getattr(img, "mode", None) != "RGB":
            img = img.convert("RGB")
        return img

matplotlib.use('Agg')  # 使用非交互式后端
plt.rcParams['font.sans-serif'] = ['DejaVu Sans', 'Arial', 'sans-serif']  # 设置字体

def get_args():
    """解析命令行参数（含自动下载与自动兜底）"""
    parser = argparse.ArgumentParser(description="改进的ResNet50遥感图像分类模型训练和压缩")

    script_dir = os.path.dirname(os.path.abspath(__file__))
    default_data_dir = os.path.normpath(os.path.join(script_dir, "..", "data_split"))
    env_data_dir = os.environ.get("RS_DATA_ROOT")
    if env_data_dir and os.path.isdir(env_data_dir):
        default_data_dir = env_data_dir

    # 模型/数据/输出
    parser.add_argument("--model", "-m", default="resnet50",
                        choices=["resnet18", "resnet50", "resnet101", "efficientnet_b0"])
    parser.add_argument("--data-dir", "-d", default=default_data_dir,
                        help="数据集根目录（应包含 train/ 与 test/ 子目录或按类的原始文件夹）")
    parser.add_argument("--output-dir", "-o", default="./output")
    parser.add_argument("--num-classes", type=int, default=10)

    # 训练
    parser.add_argument("--epochs", "-e", type=int, default=25)
    parser.add_argument("--batch-size", "-b", type=int, default=16)
    parser.add_argument("--learning-rate", "-lr", type=float, default=1e-3)
    parser.add_argument("--weight-decay", type=float, default=5e-4)
    parser.add_argument("--patience", type=int, default=8)
    parser.add_argument("--dataset-size", type=int, default=5000)
    parser.add_argument("--img-size", type=int, default=224)

    # 模式与预训练/简化你原来的额外选项（保持兼容）
    parser.add_argument("--mode", choices=['train', 'compress', 'distill', 'both'], default='both')
    parser.add_argument("--pretrained-path", type=str, default=None)
    parser.add_argument("--use-pretrained", action="store_true", default=True)

    # 恢复与蒸馏（保留你原有参数）
    parser.add_argument("--resume", action="store_true", default=False)
    parser.add_argument("--resume-path", type=str, default=None)
    parser.add_argument("--finetune-epochs", type=int, default=5)
    parser.add_argument("--finetune-lr", type=float, default=1e-4)

    parser.add_argument("--distill", action="store_true", default=False)
    parser.add_argument("--distill-epochs", type=int, default=8)
    parser.add_argument("--distill-temp", type=float, default=4.0)
    parser.add_argument("--distill-alpha", type=float, default=0.5)

    # 新增：自动下载与切分
    parser.add_argument("--auto-download", action="store_true", default=False,
                        help="当 data-dir 不含 train/test 且无类别结构时，自动下载 Kaggle 数据集并切分")
    parser.add_argument("--kaggle-dataset", type=str,
                        default="mahmoudreda55/satellite-image-classification",
                        help="Kaggle 数据集标识（kagglehub 用），例如 'user/dataset-name'")
    parser.add_argument("--test-size", type=float, default=0.2,
                        help="自动划分测试集比例（0~1）")

    args = parser.parse_args()

    if not (0.0 < args.test_size < 1.0):
        raise ValueError(f"--test-size 必须在 (0,1) 区间内，当前为 {args.test_size}")

    # 兜底：直接点“运行”也能触发下载
    def _has_train_test(dirpath: str) -> bool:
        return (os.path.isdir(os.path.join(dirpath, "train")) and
                os.path.isdir(os.path.join(dirpath, "test")))

    data_dir_exists = os.path.isdir(args.data_dir)
    data_ready = data_dir_exists and _has_train_test(args.data_dir)
    if not data_ready:
        if not data_dir_exists:
            print(f"[提示] 数据目录不存在: {args.data_dir}")
        if not args.auto_download:
            print("[提示] 未检测到可用数据集，已自动开启 --auto-download，并将使用 Kaggle 数据集。")
            args.auto_download = True
        # 把 data-dir 指到一个专门用于自动下载的目录，避免与你手动目录冲突
        kaggle_dir = os.path.normpath(os.path.join(script_dir, "..", "data_split_kaggle"))
        if os.path.normpath(args.data_dir) != kaggle_dir:
            print(f"[提示] 将数据目录切换为（用于自动下载与切分）：{kaggle_dir}")
        args.data_dir = kaggle_dir

    return args

def load_resnet_model(model_name, num_classes=10, use_pretrained=True):
    """加载带有Dropout的ResNet模型"""
    print(f"加载 {model_name} 模型...")

    if model_name == "resnet18":
        from torchvision.models import resnet18, ResNet18_Weights
        if use_pretrained:
            model = resnet18(weights=ResNet18_Weights.IMAGENET1K_V1)
            print("使用ImageNet预训练的ResNet18")
        else:
            model = resnet18(weights=None)
            print("使用随机初始化的ResNet18")

        # 添加Dropout的分类器
        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "resnet50":
        from torchvision.models import resnet50, ResNet50_Weights
        if use_pretrained:
            model = resnet50(weights=ResNet50_Weights.IMAGENET1K_V2)
            print("使用ImageNet预训练的ResNet50")
        else:
            model = resnet50(weights=None)
            print("使用随机初始化的ResNet50")

        # 添加Dropout的分类器
        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "resnet101":
        from torchvision.models import resnet101, ResNet101_Weights
        if use_pretrained:
            model = resnet101(weights=ResNet101_Weights.IMAGENET1K_V2)
            print("使用ImageNet预训练的ResNet101")
        else:
            model = resnet101(weights=None)
            print("使用随机初始化的ResNet101")

        model.fc = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.fc.in_features, num_classes)
        )

    elif model_name == "efficientnet_b0":
        from torchvision.models import efficientnet_b0, EfficientNet_B0_Weights
        if use_pretrained:
            model = efficientnet_b0(weights=EfficientNet_B0_Weights.IMAGENET1K_V1)
            print("使用ImageNet预训练的EfficientNet-B0")
        else:
            model = efficientnet_b0(weights=None)
            print("使用随机初始化的EfficientNet-B0")

        model.classifier = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(model.classifier[1].in_features, num_classes)
        )

    else:
        raise ValueError(f"不支持的模型: {model_name}")

    return model

def create_dataloaders(data_dir, batch_size=16, dataset_size=5000, img_size=224, num_classes=10,
                       auto_download=False, kaggle_dataset="mahmoudreda55/satellite-image-classification",
                       test_size=0.2):
    """从真实数据集目录创建 DataLoader（自动下载/切分支持）"""
    print("准备真实遥感图像分类数据集 (ImageFolder)...")

    # 准备可用根目录：优先使用现有 train/test；否则识别类别结构；再否则按需下载+切分
    root = _prepare_dataset_entrypoint(
        data_dir=data_dir,
        auto_download=auto_download,
        kaggle_dataset=kaggle_dataset,
        test_size=test_size
    )

    train_dir = os.path.join(root, "train")
    test_dir  = os.path.join(root, "test")

    train_transform = transforms.Compose([
        EnsureRGB(),
        transforms.RandomResizedCrop(img_size, scale=(0.8, 1.0)),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.RandomVerticalFlip(p=0.1),
        transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.05),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    test_transform = transforms.Compose([
        EnsureRGB(),
        transforms.Resize(int(img_size * 1.15)),
        transforms.CenterCrop(img_size),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    train_dataset = datasets.ImageFolder(train_dir, transform=train_transform)
    test_dataset  = datasets.ImageFolder(test_dir,  transform=test_transform)

    class_names = train_dataset.classes
    print(f"发现的类: {class_names}（共 {len(class_names)} 类）")
    if num_classes != len(class_names):
        print(f"[提示] --num-classes 建议设为 {len(class_names)}")

    num_workers = 0  # Windows 友好
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                              num_workers=num_workers, pin_memory=True, drop_last=True)
    test_loader  = DataLoader(test_dataset,  batch_size=batch_size, shuffle=False,
                              num_workers=num_workers, pin_memory=True)

    print(f"训练集大小: {len(train_dataset)} | 测试集大小: {len(test_dataset)}")
    return train_loader, test_loader

def train_model(model, train_loader, test_loader, device, args):
    """改进的训练函数，解决过拟合问题"""
    print("开始训练ResNet模型...")

    # 损失函数（带标签平滑）
    criterion = nn.CrossEntropyLoss(label_smoothing=0.1)

    # 分层学习率设置
    if args.use_pretrained:
        backbone_params, classifier_params = [], []
        for name, param in model.named_parameters():
            if 'fc' in name:
                classifier_params.append(param)
            else:
                backbone_params.append(param)
        optimizer = optim.AdamW([
            {'params': backbone_params, 'lr': args.learning_rate * 0.1},
            {'params': classifier_params, 'lr': args.learning_rate}
        ], weight_decay=args.weight_decay)
    else:
        optimizer = optim.AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)

    # One-Cycle 调度器（按 epoch 更新）
    scheduler = optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=args.learning_rate,
        epochs=args.epochs,
        steps_per_epoch=len(train_loader),
        pct_start=0.3
    )

    train_losses, train_accuracies, test_accuracies = [], [], []
    best_test_acc = 0.0
    best_model_state = None

    for epoch in range(args.epochs):
        model.train()
        running_loss = 0.0
        correct_train = 0
        total_train = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for images, labels in pbar:
            images, labels = images.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()

            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)

            optimizer.step()
            # —— 不要在这里 scheduler.step() ——

            # 记录
            running_loss += loss.item()
            pred = outputs.argmax(dim=1)
            total_train += labels.size(0)
            correct_train += (pred == labels).sum().item()

            current_lr = scheduler.get_last_lr()[0]
            pbar.set_postfix({
                'Loss': f'{loss.item():.4f}',
                'Acc': f'{100. * correct_train / total_train:.2f}%',
                'LR': f'{current_lr:.6f}'
            })

        # —— Epoch 结束后更新学习率
        scheduler.step()

        # 验证
        avg_train_loss = running_loss / len(train_loader)
        train_acc = correct_train / total_train
        test_acc = evaluate_model(model, test_loader, device)

        train_losses.append(avg_train_loss)
        train_accuracies.append(train_acc)
        test_accuracies.append(test_acc)

        if test_acc > best_test_acc:
            best_test_acc = test_acc
            best_model_state = copy.deepcopy(model.state_dict())

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train Acc: {train_acc:.4f}, '
              f'Test Acc: {test_acc:.4f}, '
              f'Best Test Acc: {best_test_acc:.4f}')

    # 加载表现最好的模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f'训练完成! 最佳测试准确率: {best_test_acc:.4f}')

    # 绘制曲线
    plot_training_curves(train_losses, train_accuracies, test_accuracies, args.output_dir)

    return model, best_test_acc, {
        'train_losses': train_losses,
        'train_accuracies': train_accuracies,
        'test_accuracies': test_accuracies,
        'best_test_acc': best_test_acc
    }

def evaluate_model(model, dataloader, device):
    """评估分类模型准确率"""
    model.eval()
    correct = 0
    total = 0

    with torch.no_grad():
        for images, labels in dataloader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = correct / total if total > 0 else 0
    return accuracy

def distill_train(teacher, student, train_loader, test_loader, device, args):
    """教师–学生知识蒸馏训练"""
    teacher.eval()
    student.train()

    criterion_ce = nn.CrossEntropyLoss()
    criterion_kd = nn.KLDivLoss(reduction='batchmean')
    optimizer = optim.AdamW(student.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)

    T = args.distill_temp
    alpha = args.distill_alpha

    best_acc = 0.0
    best_state = None

    for epoch in range(args.distill_epochs):
        running_loss, correct, total = 0.0, 0, 0

        for x, y in tqdm(train_loader, desc=f"KD Epoch {epoch+1}/{args.distill_epochs}"):
            x, y = x.to(device), y.to(device)
            with torch.no_grad():
                logits_t = teacher(x)

            logits_s = student(x)

            # 硬标签交叉熵
            loss_ce = criterion_ce(logits_s, y)
            # 软标签 KL 散度
            p_s = F.log_softmax(logits_s / T, dim=1)
            p_t = F.softmax(logits_t / T, dim=1)
            loss_kd = criterion_kd(p_s, p_t) * (T * T)

            loss = alpha * loss_ce + (1 - alpha) * loss_kd

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            running_loss += loss.item() * y.size(0)
            pred = logits_s.argmax(dim=1)
            correct += (pred == y).sum().item()
            total += y.size(0)

        train_loss = running_loss / total
        train_acc = correct / total
        test_acc  = evaluate_model(student, test_loader, device)

        if test_acc > best_acc:
            best_acc = test_acc
            best_state = copy.deepcopy(student.state_dict())

        print(f"[KD Epoch {epoch+1}] loss={train_loss:.4f} train_acc={train_acc:.4f} test_acc={test_acc:.4f}")

    # 恢复最优模型
    if best_state is not None:
        student.load_state_dict(best_state)
    return student, best_acc

def comprehensive_evaluation(model, dataloader, device, class_names):
    """
    全面评估：Accuracy/宏P-R-F1/混淆矩阵 + 效率指标
    （参数量、模型大小、推理时延、吞吐、显存峰值）
    """
    model.eval()
    y_true, y_pred = [], []
    total_time = 0.0

    use_cuda = (device.type == 'cuda' and torch.cuda.is_available())
    if use_cuda:
        torch.cuda.reset_peak_memory_stats(device)

    with torch.no_grad():
        for images, labels in dataloader:
            images, labels = images.to(device), labels.to(device)

            if use_cuda: torch.cuda.synchronize()
            t0 = time.perf_counter()
            outputs = model(images)
            if use_cuda: torch.cuda.synchronize()
            total_time += (time.perf_counter() - t0)

            _, preds = torch.max(outputs, 1)
            y_true.extend(labels.cpu().numpy().tolist())
            y_pred.extend(preds.cpu().numpy().tolist())

    accuracy = accuracy_score(y_true, y_pred)
    precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(
        y_true, y_pred, average='macro', zero_division=0)
    precision_weighted, recall_weighted, f1_weighted, _ = precision_recall_fscore_support(
        y_true, y_pred, average='weighted', zero_division=0)
    cm = confusion_matrix(y_true, y_pred)

    # 每类报告（打印查看即可）
    from sklearn.metrics import classification_report
    print("\n=== Per-class report ===\n" +
          classification_report(y_true, y_pred, target_names=class_names, zero_division=0))

    # 效率
    num_params = sum(p.numel() for p in model.parameters())
    size_mb = get_model_size(model)
    num_images = len(y_true)
    avg_latency_ms = (total_time / max(1, num_images)) * 1000.0
    throughput = num_images / max(1e-9, total_time)
    gpu_mem_mb = torch.cuda.max_memory_allocated(device) / (1024 * 1024) if use_cuda else None

    print("=== Efficiency ===")
    print(f"参数量: {num_params:,}")
    print(f"模型大小: {size_mb:.2f} MB")
    print(f"平均推理时延: {avg_latency_ms:.3f} ms/图")
    print(f"吞吐: {throughput:.2f} img/s")
    if gpu_mem_mb is not None:
        print(f"评估峰值显存: {gpu_mem_mb:.2f} MB")

    return {
        'accuracy': accuracy,
        'precision': precision_macro,
        'recall': recall_macro,
        'f1_score': f1_macro,
        'precision_weighted': precision_weighted,
        'recall_weighted': recall_weighted,
        'f1_weighted': f1_weighted,
        'confusion_matrix': cm.tolist(),
        'y_true': y_true,
        'y_pred': y_pred,
        'num_params': num_params,
        'model_size_mb': size_mb,
        'avg_latency_ms': avg_latency_ms,
        'throughput_img_s': throughput,
        'gpu_peak_mem_mb': gpu_mem_mb
    }

def plot_training_curves(train_losses, train_accuracies, test_accuracies, output_dir):
    """绘制训练曲线"""
    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(15, 5))

    # 绘制损失曲线
    plt.subplot(1, 3, 1)
    plt.plot(epochs, train_losses, 'b-', label='Training Loss', linewidth=2)
    plt.title('Training Loss', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Loss', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 绘制准确率曲线
    plt.subplot(1, 3, 2)
    plt.plot(epochs, train_accuracies, 'b-', label='Training Accuracy', linewidth=2)
    plt.plot(epochs, test_accuracies, 'r-', label='Test Accuracy', linewidth=2)
    plt.title('Training and Test Accuracy', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Accuracy', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 绘制过拟合分析
    plt.subplot(1, 3, 3)
    overfitting_gap = [train_acc - test_acc for train_acc, test_acc in zip(train_accuracies, test_accuracies)]
    plt.plot(epochs, overfitting_gap, 'g-', label='Overfitting Gap', linewidth=2)
    plt.title('Overfitting Analysis', fontsize=14)
    plt.xlabel('Epoch', fontsize=12)
    plt.ylabel('Train Acc - Test Acc', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.axhline(y=0, color='k', linestyle='--', alpha=0.5)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'training_curves.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"训练曲线已保存到: {os.path.join(output_dir, 'training_curves.png')}")

def save_eval_report(eval_res, output_dir, tag="fp32", class_names=None):
    """
    保存评估结果为 JSON，并绘制混淆矩阵（原始/行归一化）。
    - eval_res: comprehensive_evaluation 的返回 dict
    - tag: 用于区分模型版本的后缀，如 'fp32' / 'student' / 'kd' 等
    """
    import numpy as np
    from sklearn.metrics import classification_report

    os.makedirs(output_dir, exist_ok=True)

    y_true = eval_res.get("y_true", [])
    y_pred = eval_res.get("y_pred", [])
    n = len(set(y_true)) if y_true else 0

    # 类名兜底
    if class_names is None:
        if "confusion_matrix" in eval_res:
            n = len(eval_res["confusion_matrix"])
        class_names = [f"class_{i}" for i in range(n)]

    # 额外：每类指标（dict，可 JSON）
    per_class = None
    if y_true and y_pred:
        rep = classification_report(y_true, y_pred, target_names=class_names, output_dict=True, zero_division=0)
        per_class = rep

    # 处理 numpy 类型 -> 可 JSON
    def _to_py(o):
        import numpy as np
        if isinstance(o, (np.integer,)): return int(o)
        if isinstance(o, (np.floating,)): return float(o)
        if isinstance(o, (np.ndarray,)): return o.tolist()
        return o

    serializable = {k: _to_py(v) for k, v in eval_res.items()}
    if per_class is not None:
        serializable["per_class_report"] = per_class

    # 写 JSON
    json_path = os.path.join(output_dir, f"eval_report_{tag}.json")
    with open(json_path, "w", encoding="utf-8") as f:
        json.dump(serializable, f, ensure_ascii=False, indent=2)
    print(f"评估报告已保存: {json_path}")

    # 画混淆矩阵（原始 & 行归一化）
    cm = np.array(eval_res.get("confusion_matrix", []))
    if cm.size:
        ticks = np.arange(cm.shape[0])

        # 原始
        plt.figure(figsize=(6.5, 5.5))
        im = plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
        plt.title(f"Confusion Matrix ({tag})")
        plt.colorbar(im, fraction=0.046, pad=0.04)
        plt.xticks(ticks, class_names, rotation=45, ha="right")
        plt.yticks(ticks, class_names)
        thresh = cm.max() / 2.0 if cm.max() > 0 else 0.5
        for i in range(cm.shape[0]):
            for j in range(cm.shape[1]):
                plt.text(j, i, int(cm[i, j]), ha="center", va="center",
                         color="white" if cm[i, j] > thresh else "black", fontsize=10)
        plt.ylabel("True label"); plt.xlabel("Predicted label")
        plt.tight_layout()
        raw_png = os.path.join(output_dir, f"confusion_matrix_{tag}.png")
        plt.savefig(raw_png, dpi=300, bbox_inches="tight"); plt.close()
        print(f"混淆矩阵（raw）已保存: {raw_png}")

        # 行归一化
        with np.errstate(divide='ignore', invalid='ignore'):
            cm_norm = cm.astype(np.float64) / (cm.sum(axis=1, keepdims=True) + 1e-12)
            cm_norm = np.nan_to_num(cm_norm)
        plt.figure(figsize=(6.5, 5.5))
        im = plt.imshow(cm_norm, interpolation='nearest', cmap=plt.cm.Blues)
        plt.title(f"Confusion Matrix Normalized ({tag})")
        plt.colorbar(im, fraction=0.046, pad=0.04)
        plt.xticks(ticks, class_names, rotation=45, ha="right")
        plt.yticks(ticks, class_names)
        for i in range(cm_norm.shape[0]):
            for j in range(cm_norm.shape[1]):
                plt.text(j, i, f"{cm_norm[i, j]:.2f}", ha="center", va="center",
                         color="white" if cm_norm[i, j] > 0.5 else "black", fontsize=10)
        plt.ylabel("True label"); plt.xlabel("Predicted label")
        plt.tight_layout()
        norm_png = os.path.join(output_dir, f"confusion_matrix_{tag}_norm.png")
        plt.savefig(norm_png, dpi=300, bbox_inches="tight"); plt.close()
        print(f"混淆矩阵（row-normalized）已保存: {norm_png}")

def plot_confusion_matrix_simple(cm, class_names, output_dir, filename='confusion_matrix.png'):
    cm = np.array(cm)
    plt.figure(figsize=(8, 7))
    im = plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
    plt.colorbar(im)
    tick_marks = np.arange(len(class_names))
    plt.xticks(tick_marks, class_names, rotation=45, ha='right')
    plt.yticks(tick_marks, class_names)
    thresh = cm.max() / 2.0 if cm.size else 0
    for i in range(len(class_names)):
        for j in range(len(class_names)):
            plt.text(j, i, format(int(cm[i, j])), ha="center", va="center",
                     color="white" if cm[i, j] > thresh else "black")
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.tight_layout()
    out_path = os.path.join(output_dir, filename)
    plt.savefig(out_path, dpi=300, bbox_inches='tight')
    plt.close()
    print(f"混淆矩阵已保存到: {out_path}")


def visualize_samples(dataloader, output_dir, num_samples=16):
    """可视化数据样本（自动读取数据集类名）"""
    class_names = getattr(dataloader.dataset, "classes", None)
    if class_names is None:
        class_names = [str(i) for i in range(10)]

    fig, axes = plt.subplots(4, 4, figsize=(16, 16))
    fig.suptitle('样本可视化', fontsize=16)

    images, labels = next(iter(dataloader))
    for i in range(min(num_samples, len(images))):
        row, col = i // 4, i % 4
        img = images[i].clone()
        mean = torch.tensor([0.485, 0.456, 0.406]).view(3, 1, 1)
        std  = torch.tensor([0.229, 0.224, 0.225]).view(3, 1, 1)
        img = torch.clamp(img * std + mean, 0, 1).permute(1, 2, 0).numpy()

        label_idx = int(labels[i])
        title = class_names[label_idx] if label_idx < len(class_names) else f"Class {label_idx}"
        axes[row, col].imshow(img)
        axes[row, col].set_title(title, fontsize=12)
        axes[row, col].axis('off')

    plt.tight_layout()
    out_path = os.path.join(output_dir, 'data_samples.png')
    plt.savefig(out_path, dpi=300, bbox_inches='tight')
    plt.close()
    print(f"数据样本已保存到: {out_path}")

def get_model_size(model):
    """计算模型在内存中的大小（MB）"""
    param_size = 0
    buffer_size = 0

    for param in model.parameters():
        param_size += param.nelement() * param.element_size()

    for buffer in model.buffers():
        buffer_size += buffer.nelement() * buffer.element_size()

    total_size = param_size + buffer_size
    size_mb = total_size / (1024 * 1024)
    return size_mb

def main():
    """主入口：自动准备数据 → 训练/微调 →（可选）知识蒸馏 → 最终评估与报告"""
    # 1) 解析参数 & 输出目录
    args = get_args()
    args.output_dir = os.path.abspath(args.output_dir)
    os.makedirs(args.output_dir, exist_ok=True)

    # 2) 设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 3) 构建模型（teacher = 当前主模型）
    model = load_resnet_model(args.model, args.num_classes, args.use_pretrained)
    model.to(device)

    # 可选：从 --pretrained-path 加载外部权重（strict=False 容忍层名不完全一致）
    if getattr(args, "pretrained_path", None):
        if os.path.isfile(args.pretrained_path):
            ckpt = torch.load(args.pretrained_path, map_location=device)
            state_dict = ckpt.get("state_dict", ckpt)
            model.load_state_dict(state_dict, strict=False)
            print(f"已加载预训练权重：{args.pretrained_path}")
        else:
            print(f"[警告] 未找到 --pretrained-path 指定文件：{args.pretrained_path}")

    # 4) 数据加载器（支持自动下载与切分）
    train_loader, test_loader = create_dataloaders(
        args.data_dir,
        batch_size=args.batch_size,
        dataset_size=args.dataset_size,
        img_size=args.img_size,
        num_classes=args.num_classes,
        auto_download=getattr(args, "auto_download", False),
        kaggle_dataset=getattr(args, "kaggle_dataset", "mahmoudreda55/satellite-image-classification"),
        test_size=getattr(args, "test_size", 0.2),
    )

    # 5) 训练或微调
    if args.mode in ['train', 'both']:
        if getattr(args, "resume", False):
            # 从 --resume-path 恢复再微调
            if args.resume_path and os.path.isfile(args.resume_path):
                print(f"从 {args.resume_path} 恢复模型权重并微调...")
                checkpoint = torch.load(args.resume_path, map_location=device)
                model.load_state_dict(checkpoint, strict=False)
                # 覆盖为微调超参
                args.epochs = args.finetune_epochs
                args.learning_rate = args.finetune_lr
            else:
                raise FileNotFoundError(f"找不到 --resume-path 指定的文件: {args.resume_path}")
        else:
            print("=== 初始训练阶段 ===")

        model, best_acc, _ = train_model(model, train_loader, test_loader, device, args)
        ckpt_path = os.path.join(args.output_dir, f"{args.model}_trained.pth")
        torch.save(model.state_dict(), ckpt_path)
        print(f"训练完成，最佳测试准确率: {best_acc:.4f}，权重已保存至 {ckpt_path}")

    # 6) 知识蒸馏（可选）：teacher 为当前 model，student 用 resnet18
    if args.mode in ['distill', 'both'] or getattr(args, "distill", False):
        print("=== 知识蒸馏（Teacher → Student）===")
        # 准备 teacher：优先使用刚训练的权重
        teacher = load_resnet_model(args.model, args.num_classes, use_pretrained=True)
        trained_ckpt = os.path.join(args.output_dir, f"{args.model}_trained.pth")
        if os.path.isfile(trained_ckpt):
            teacher.load_state_dict(torch.load(trained_ckpt, map_location=device), strict=False)
            print(f"Teacher 权重加载自：{trained_ckpt}")
        else:
            # 若没有训练产物，则用当前内存中的 model
            teacher.load_state_dict(model.state_dict(), strict=False)
            print("[提示] 未找到训练产物，使用当前内存中的模型作为 Teacher")
        teacher.to(device).eval()

        # Student：轻量模型
        student = load_resnet_model('resnet18', args.num_classes, use_pretrained=False).to(device)
        student, kd_best = distill_train(teacher, student, train_loader, test_loader, device, args)

        student_ckpt = os.path.join(args.output_dir, "student_kd.pth")
        torch.save(student.state_dict(), student_ckpt)
        print(f"蒸馏完成，学生模型最佳测试准确率: {kd_best:.4f}，权重已保存至 {student_ckpt}")

    # 7) 最终评估与报告
    print("=== 最终评估 ===")
    class_names = getattr(test_loader.dataset, 'classes', [str(i) for i in range(args.num_classes)])

    # 评估当前主模型（FP32）
    eval_main = comprehensive_evaluation(model, test_loader, device, class_names)
    save_eval_report(eval_main, args.output_dir, tag="fp32", class_names=class_names)

    # 如做了蒸馏，评估学生模型
    if args.mode in ['distill', 'both'] or getattr(args, "distill", False):
        student_ckpt = os.path.join(args.output_dir, "student_kd.pth")
        if os.path.isfile(student_ckpt):
            student = load_resnet_model('resnet18', args.num_classes, use_pretrained=False)
            student.load_state_dict(torch.load(student_ckpt, map_location=device), strict=False)
            student.to(device).eval()
            eval_student = comprehensive_evaluation(student, test_loader, device, class_names)
            save_eval_report(eval_student, args.output_dir, tag="student", class_names=class_names)
        else:
            print(f"[警告] 未找到学生模型权重 {student_ckpt}，跳过学生评估。")

if __name__ == "__main__":
    main()
