# -*- coding: utf-8 -*-
"""
多卡版分类训练与推理工具
======================

文件：src/trainer.py
作者：zym1105（多卡支持与中文注释改造）
日期：2025-08-19

改动要点
--------
1) 支持多卡：默认使用 DataParallel，自动使用所有可见 GPU（由 CUDA_VISIBLE_DEVICES 控制）。
2) batch_size 解释为“每卡 batch”，程序内部自动乘以可见卡数得到总 batch。
3) 训练/推理对 DataParallel 透明（model = DataParallel(model).to(device)）。
4) 其他逻辑（K 折训练、日志、曲线保存）保持不变。

注意
----
- DataParallel 下 DataLoader 的 batch_size 应为 **总 batch**，因此本文件会将用户传入的 `batch_size`
  （每卡）乘以 `n_gpus`，生成 DataLoader 的 batch_size。
- 如果未来要切换到 DDP（更高效），需要改用 DistributedSampler 与 torchrun 启动。
"""

import os
import json
import numpy as np
import torch
import matplotlib.pyplot as plt

from torch.utils.data import DataLoader, Subset
from torch.optim.lr_scheduler import CosineAnnealingLR
from sklearn.model_selection import KFold
from tqdm import tqdm
from PIL import Image  # noqa: F401  # 仅为确保 PIL 安装齐全
from src.models import load_model
from sklearn.metrics import accuracy_score, roc_auc_score

# -------------------------
# 可选：加速一些 cuDNN 的选择
# -------------------------
torch.backends.cudnn.benchmark = True


def _get_device_and_ngpus(device_str: str):
    """根据传入 device 字符串与可用 GPU，返回实际 device 和可见 GPU 数。"""
    if device_str.startswith("cuda") and torch.cuda.is_available():
        n_gpus = torch.cuda.device_count()
        if n_gpus > 0:
            return torch.device("cuda"), n_gpus
    # 回退到 CPU
    return torch.device("cpu"), 0


def save_loss_curve(loss_list, save_path, title="训练损失曲线", xlabel="Epoch", ylabel="Loss"):
    plt.figure()
    plt.plot(range(1, len(loss_list) + 1), loss_list, marker='o')
    plt.title(title)
    plt.xlabel(xlabel)
    plt.ylabel(ylabel)
    plt.grid()
    plt.tight_layout()
    plt.savefig(save_path)
    plt.close()


def save_log(log_lines, save_path):
    """log_lines 为 list[str] 或 str，保存为 UTF-8 文本。"""
    with open(save_path, 'w', encoding='utf-8') as f:
        if isinstance(log_lines, str):
            f.write(log_lines)
        else:
            for line in log_lines:
                f.write(line.rstrip() + "\n")


def accuracy(preds, labels):
    return (preds == labels).sum() / len(labels)


def _wrap_dataparallel_if_needed(model: torch.nn.Module, device: torch.device, n_gpus: int):
    """
    如果有多块 GPU，使用 DataParallel 包装模型；否则直接 to(device)。
    返回：包装后的模型。
    """
    model = model.to(device)
    if n_gpus > 1:
        # device_ids 将默认使用所有可见卡（由 CUDA_VISIBLE_DEVICES 控制）
        model = torch.nn.DataParallel(model)
    return model


def train_one_fold(model,
                   train_dataset,
                   epochs,
                   per_gpu_batch_size,
                   lr,
                   wd,
                   device_str: str):
    """
    单折训练函数（支持多卡）。
    参数：
      - per_gpu_batch_size: 每张 GPU 的 batch size
      - device_str: 'cuda' / 'cpu'（一般传 'cuda' 即可）
    """
    device, n_gpus = _get_device_and_ngpus(device_str)
    total_batch_size = per_gpu_batch_size * max(1, n_gpus)

    train_loader = DataLoader(
        train_dataset,
        batch_size=total_batch_size,
        shuffle=True,
        num_workers=4,
        pin_memory=(device.type == "cuda"),
        drop_last=False,
    )

    optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=wd)
    criterion = torch.nn.CrossEntropyLoss()
    scheduler = CosineAnnealingLR(optimizer, T_max=epochs)

    # 包装模型（多卡则 DataParallel）
    model = _wrap_dataparallel_if_needed(model, device, n_gpus)

    train_loss_curve, train_acc_curve = [], []
    train_log = []
    print(
        f"🚀 开始训练 | 可见GPU: {n_gpus} | 每卡BS: {per_gpu_batch_size} | 总BS: {total_batch_size} | "
        f"学习率: {lr} | 权重衰减: {wd}"
    )

    for epoch in range(epochs):
        model.train()
        train_loss, train_correct, train_total = 0.0, 0, 0

        for batch in tqdm(train_loader, desc=f"🚂 训练 Epoch {epoch+1}/{epochs}", leave=False):
            images, labels, image_paths = batch
            images = images.to(device, non_blocking=True)
            labels = labels.to(device, non_blocking=True)

            optimizer.zero_grad(set_to_none=True)
            outputs = model(images)
            logits = outputs.logits if hasattr(outputs, 'logits') else outputs
            loss = criterion(logits, labels)
            preds = logits.argmax(1)

            loss.backward()
            optimizer.step()

            batch_size_curr = labels.size(0)
            train_loss += loss.item() * batch_size_curr
            train_correct += (preds == labels).sum().item()
            train_total += batch_size_curr

        avg_train_loss = train_loss / max(1, train_total)
        train_acc = train_correct / max(1, train_total)
        train_loss_curve.append(avg_train_loss)
        train_acc_curve.append(train_acc)

        log_line = (
            f"Epoch {epoch+1}/{epochs} | Loss: {avg_train_loss:.4f} | "
            f"Acc: {train_acc:.4f} | LR: {scheduler.get_last_lr()[0]:.6f}"
        )
        print(log_line)
        train_log.append(log_line)

        scheduler.step()

    return model, train_loss_curve, train_acc_curve, train_log

def inference_model(model, loader, device, num_classes: int):
    """
    单卡推理（不使用 DataParallel），并计算每个模型在该 fold 的 acc/auc。
    Dataset 的 __getitem__ 必须返回: (image_tensor, label_int, image_path)
    """
    model.to(device)
    model.eval()


    all_probs = []
    all_labels = []
    path2prob = {}
    path2label = {}

    with torch.no_grad():
        for images, labels, image_paths in loader:
            images = images.to(device, non_blocking=True)
            labels = labels.to(device, non_blocking=True)

            outputs = model(images)
            logits = outputs.logits if hasattr(outputs, "logits") else outputs
            probs = torch.softmax(logits, dim=1).cpu().numpy()
            labels_np = labels.cpu().numpy()

            all_probs.append(probs)
            all_labels.append(labels_np)

            for p, prob, lab in zip(image_paths, probs, labels_np):
                path2prob[p] = prob.tolist()
                path2label[p] = int(lab)

    all_probs = np.concatenate(all_probs, axis=0) if len(all_probs) > 0 else np.zeros((0, num_classes))
    all_labels = np.concatenate(all_labels, axis=0) if len(all_labels) > 0 else np.zeros((0,), dtype=int)

    # 计算 acc / auc
    acc = float("nan")
    auc = None
    if all_probs.shape[0] > 0:
        preds = np.argmax(all_probs, axis=1)
        acc = accuracy_score(all_labels, preds)
        try:
            auc = roc_auc_score(all_labels, all_probs, multi_class="ovo")
        except Exception as e:
            print(f"⚠️ AUC 计算失败: {e}")

    return all_probs, all_labels, path2prob, path2label, acc, auc


def run_kfold_training(
    dataset,
    models,
    kfold=4,
    batch_size=16,
    lr=1e-5,
    wd=0.01,
    epochs=5,
    device='cuda',
    model_cache_dir='experiments/huggingmodel',
    save_log_dir='./experiments/data_selector_log'
):
    """
    K 折训练 + 多模型投票 + OOF 概率与映射保存
    每个模型都会单独计算 acc / auc，并保存到 JSON。
    """
    os.makedirs(model_cache_dir, exist_ok=True)
    os.makedirs(save_log_dir, exist_ok=True)

    all_labels = dataset.labels
    all_image_paths = dataset.image_paths
    num_labels = len(set(all_labels))
    n_samples = len(dataset)

    print(f"共 {n_samples} 条数据，{kfold} 折交叉验证，类别数 {num_labels}")

    out_of_fold_probs = np.zeros((n_samples, num_labels), dtype=np.float32)
    path2prob = {}
    path2label = {}

    # 存每个模型的结果
    metrics_log = {m: [] for m in models}

    kf = KFold(n_splits=kfold, shuffle=True, random_state=42)

    for fold, (train_idx, val_idx) in enumerate(kf.split(range(n_samples))):
        print("\n==============================")
        print(f"🚩 开始第 {fold+1} 折 (train:{len(train_idx)} val:{len(val_idx)})")

        fold_probs_list = []

        train_subset = Subset(dataset, train_idx)
        val_subset = Subset(dataset, val_idx)

        for model_type in models:
            print(f"\n----  当前模型: {model_type} ----")
            model = load_model(model_type=model_type, num_classes=num_labels)

            # 训练
            model, loss_curve, acc_curve, train_log = train_one_fold(
                model=model,
                train_dataset=train_subset,
                epochs=epochs,
                per_gpu_batch_size=batch_size,
                lr=lr,
                wd=wd,
                device_str=device,
            )

            # --- 日志和曲线保存 ---
            log_dir = os.path.join(save_log_dir, f"{model_type}_fold{fold+1}")
            os.makedirs(log_dir, exist_ok=True)
            save_loss_curve(
                loss_curve, os.path.join(log_dir, "train_loss_curve.png"),
                title=f"{model_type} 第{fold+1}折 训练损失"
            )
            save_loss_curve(
                acc_curve, os.path.join(log_dir, "train_acc_curve.png"),
                title=f"{model_type} 第{fold+1}折 训练准确率", ylabel="Acc"
            )
            save_log(train_log, os.path.join(log_dir, "train_log.txt"))

            # 验证集推理 + 计算 acc/auc
            probs, labels, _, _, acc, auc = inference_model(
                model=model,
                loader=torch.utils.data.DataLoader(
                    val_subset,
                    batch_size=batch_size,
                    shuffle=False,
                    num_workers=4,
                    pin_memory=True
                ),
                device=device,
                num_classes=num_labels
            )
            fold_probs_list.append(probs)

            metrics_log[model_type].append({
                "fold": fold + 1,
                "acc": acc,
                "auc": auc
            })
            print(f"✅ {model_type} Fold {fold+1}: acc={acc:.4f}, auc={auc:.4f}" if auc else f"✅ {model_type} Fold {fold+1}: acc={acc:.4f}")

        # 模型平均投票
        fold_probs = np.mean(np.stack(fold_probs_list, axis=0), axis=0)
        out_of_fold_probs[val_idx] = fold_probs

        for local_i, idx in enumerate(val_idx):
            image_path = all_image_paths[idx]
            path2prob[image_path] = fold_probs[local_i].tolist()
            path2label[image_path] = int(all_labels[idx])

        print(f"🎉 第 {fold+1} 折完成！")

    print("\n✅ K 折全部完成，开始写出结果文件 ...")
    with open(os.path.join(save_log_dir, "path2prob.json"), "w", encoding="utf-8") as f:
        json.dump(path2prob, f, ensure_ascii=False, indent=2)
    with open(os.path.join(save_log_dir, "path2label.json"), "w", encoding="utf-8") as f:
        json.dump(path2label, f, ensure_ascii=False, indent=2)

    # === 保存每个模型的 acc/auc ===
    with open(os.path.join(save_log_dir, "model_metrics.json"), "w", encoding="utf-8") as f:
        json.dump(metrics_log, f, ensure_ascii=False, indent=2)

    return out_of_fold_probs, all_labels, path2prob, path2label