import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import h5py
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import roc_auc_score
import logging
import sys
from trident.slide_encoder_models import ABMILSlideEncoder
from typing import Dict, Union, Tuple, Optional

# *** 导入 Transformer 模块 ***
from torch.nn import TransformerEncoder, TransformerEncoderLayer
import datetime

# ----------------------------------------------------------------------------
# --- 1. 配置 (CONFIGURATION) ---
# ----------------------------------------------------------------------------

# --- 日志配置 ---
now = datetime.datetime.now()
timestamp = now.strftime("%Y%m%d_%H%M")
log_file_path = f"logs/{timestamp}_cross_mil_val_training.log"

# --- 路径配置 ---
# 内部交叉验证 (CV) 数据
split_file_path = "/data0/lcy/Patho-Bench/tools/zzylnm_slices_splits2/cohort.tsv"
# split_file_path = "/data0/lcy/Patho-Bench/tools/zzylnm_slices_splits/k=all.tsv"

# feats_path = "/data0/lcy/data/LNM/LNM_slices_conch_v1_processed/20x_512px_0px_overlap/features_conch_v1"
feats_path = "/data0/lcy/data/LNM/LNM_slices_uni_v1_processed/10x_512px_0px_overlap/features_uni_v1"
# 交换数据集
# feats_path = "/data0/lcy/data/LNM/LNM_Zhujiang_uni_v1_processed/10x_512px_0px_overlap/features_uni_v1"
# split_file_path = "/data0/lcy/Patho-Bench/tools/zzylnm_zhujiang_splits/cohort.tsv"
# 外部测试 (External Test) 数据
# 交换数据集
external_manifest_path = (
    "/data0/lcy/Patho-Bench/tools/zzylnm_zhujiang_splits/cohort.tsv"
    # "/data0/lcy/Patho-Bench/tools/zzylnm_slices_splits/k=all.tsv"
)

external_feats_path = "/data0/lcy/data/LNM/LNM_Zhujiang_uni_v1_processed/10x_512px_0px_overlap/features_uni_v1_imagenet"
external_feats_path = "/data0/lcy/data/LNM/LNM_Zhujiang_uni_v1_processed/10x_512px_0px_overlap/features_uni_v1"
# external_feats_path = "/data0/lcy/data/LNM/LNM_slices_uni_v1_processed/10x_512px_0px_overlap/features_uni_v1"

# --- 模型与训练配置 ---
feature_dim = 1024  # 特征维度 (例如 1024 for UNI)
task_label_col = "label"  # 标签列名
fold_columns = ["fold_0", "fold_1", "fold_2", "fold_3", "fold_4"]  # CV 折数
save_dir = "saved_models"  # 模型保存目录

# 训练超参数
SEED = 42
# 注意: num_epochs = 1 将无法观察到过拟合。
# 建议增加 (例如 20) 来使用新添加的 "每个 epoch 验证" 功能。
num_epochs = 10
batch_size = 8
learning_rate = 1e-4
train_patches_sampled = 1024  # 训练时采样的 patch 数量

# --- 选择要运行的模型 ---
model_types_to_run = [
    "abmil",
    "dsmil",
    "transmil",
    "linearprobe",
    "maemil",
]

# ----------------------------------------------------------------------------
# --- 2. 日志和种子辅助函数 (Logging & Seed Helpers) ---
# ----------------------------------------------------------------------------


# 在 main() 中调用
def setup_logging(log_path):
    """配置日志记录器"""
    logging.getLogger().handlers = []
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s - %(levelname)s - %(message)s",
        datefmt="%Y-%m-%d %H:%M:%S",
        handlers=[
            logging.FileHandler(log_path, mode="w"),
            logging.StreamHandler(sys.stdout),
        ],
    )
    logger = logging.getLogger()
    logger.info(f"日志系统已启动。日志将保存到: {log_path}")
    return logger


def set_seed(seed_value: int):
    """设置所有随机种子以保证可复现性"""
    np.random.seed(seed_value)
    torch.manual_seed(seed_value)
    torch.cuda.manual_seed_all(seed_value)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    logging.info(f"随机种子设置为 {seed_value}")


# ----------------------------------------------------------------------------
# --- 3. 数据加载辅助函数 (Data Loading Helper) ---
# ----------------------------------------------------------------------------


def load_manifest(
    manifest_path: str, h5_feats_path: str, label_col: str
) -> Optional[pd.DataFrame]:
    """
    加载 TSV 或 Parquet manifest 文件，并过滤掉 H5 特征文件不存在的样本。
    """
    logger = logging.getLogger()
    logger.info(f"正在加载 manifest: {manifest_path}")

    if not os.path.exists(manifest_path):
        logger.error(f"Manifest 文件未找到: {manifest_path}")
        return None

    if not os.path.exists(h5_feats_path):
        logger.error(f"特征路径未找到: {h5_feats_path}")
        return None

    # 1. 加载 DataFrame
    try:
        parquet_path = manifest_path.replace(".tsv", ".parquet")
        if os.path.exists(parquet_path):
            logger.debug(f"正在从 Parquet 加载: {parquet_path}")
            df = pd.read_parquet(parquet_path)
        else:
            logger.info(f"正在从 TSV 加载 (首次): {manifest_path}")
            df = pd.read_csv(manifest_path, sep="\t")
            logger.info(f"正在保存为 Parquet 以加快未来加载速度: {parquet_path}")
            df.to_parquet(parquet_path)
    except Exception as e:
        logger.error(f"加载 manifest 文件 {manifest_path} 时出错: {e}")
        return None

    if "slide_id" not in df.columns:
        logger.error(f"Manifest 文件 {manifest_path} 中缺少 'slide_id' 列。")
        return None

    if label_col not in df.columns:
        logger.error(f"Manifest 文件 {manifest_path} 中缺少标签列: '{label_col}'。")
        return None

    logger.info(f"Manifest 加载成功。总样本数 (原始): {len(df)}")

    # 2. 获取所有可用的 H5 文件的基本名称（不带 .h5）
    try:
        available_files = [
            f.replace(".h5", "") for f in os.listdir(h5_feats_path) if f.endswith(".h5")
        ]
        available_files_set = set(available_files)
    except Exception as e:
        logger.error(f"读取特征路径 {h5_feats_path} 时出错: {e}")
        return None

    if not available_files_set:
        logger.error(f"特征路径 {h5_feats_path} 中未找到 .h5 文件。")
        return None

    # 3. 过滤 df，只保留那些 H5 文件确实存在的行
    original_count = len(df)
    df = df[df["slide_id"].isin(available_files_set)].copy()
    filtered_count = len(df)

    if original_count > filtered_count:
        logger.warning(
            f"过滤了 {original_count - filtered_count} 个样本，因为在 {h5_feats_path} 中找不到对应的 .h5 文件。"
        )

    if filtered_count == 0:
        logger.error("过滤后无可用样本。请检查 slide_id 和 H5 文件名是否匹配。")
        return None

    logger.info(f"Manifest 准备就绪。可用样本数 (过滤后): {filtered_count}")
    return df


# ----------------------------------------------------------------------------
# --- 4. 模型定义 (MODEL DEFINITIONS) ---
# ----------------------------------------------------------------------------
# (保持所有模型定义与您提供的代码一致)


# 1. 原始模型 (ABMIL)
class BinaryClassificationModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_heads=1,
        head_dim=512,
        dropout=0.0,
        gated=True,
        hidden_dim=256,
    ):
        super().__init__()
        self.feature_encoder = ABMILSlideEncoder(
            freeze=False,
            input_feature_dim=input_feature_dim,
            n_heads=n_heads,
            head_dim=head_dim,
            dropout=dropout,
            gated=gated,
        )
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x, return_raw_attention=False):
        if return_raw_attention:
            features, attn = self.feature_encoder(x, return_raw_attention=True)
        else:
            features = self.feature_encoder(x)
        logits = self.classifier(features).squeeze(1)
        if return_raw_attention:
            return logits, attn
        return logits


# 2. DSMILModel
class DSMILModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        hidden_dim=256,
        n_heads=1,
        head_dim=512,
        dropout=0.0,
        gated=True,
    ):
        super().__init__()
        self.instance_classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )
        self.bag_aggregator = ABMILSlideEncoder(
            freeze=False,
            input_feature_dim=input_feature_dim,
            n_heads=n_heads,
            head_dim=head_dim,
            dropout=dropout,
            gated=gated,
        )
        self.bag_classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        features = x_dict["features"]  # [B, N, D]
        instance_logits = self.instance_classifier(features)  # [B, N, 1]
        critical_logit, _ = torch.max(instance_logits, dim=1)  # [B, 1]
        bag_feature = self.bag_aggregator(x_dict)  # [B, D]
        bag_logit = self.bag_classifier(bag_feature)  # [B, 1]
        final_logit = (critical_logit + bag_logit) / 2
        return final_logit.squeeze(1)


# 3. TransMILModel
class TransMILModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_head=8,
        num_encoder_layers=2,
        dim_feedforward=512,
        hidden_dim=256,
        dropout=0.1,
    ):
        super().__init__()
        self.input_feature_dim = input_feature_dim
        encoder_layer = TransformerEncoderLayer(
            d_model=input_feature_dim,
            nhead=n_head,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True,
        )
        self.transformer_encoder = TransformerEncoder(
            encoder_layer, num_layers=num_encoder_layers
        )
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        features = x_dict["features"]  # [B, N, D]
        context_features = self.transformer_encoder(features)  # [B, N, D]
        bag_feature = torch.mean(context_features, dim=1)  # [B, D]
        logits = self.classifier(bag_feature)
        return logits.squeeze(1)


# 4. HybridAttnMILModel (v2)
class HybridAttnMILModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_head=8,
        num_encoder_layers=2,
        transformer_ff_dim=2048,
        abmil_n_heads=1,
        abmil_head_dim=512,
        hidden_dim=256,
        dropout=0.1,
    ):
        super().__init__()
        encoder_layer = TransformerEncoderLayer(
            d_model=input_feature_dim,
            nhead=n_head,
            dim_feedforward=transformer_ff_dim,
            dropout=dropout,
            batch_first=True,
            norm_first=True,
        )
        self.context_encoder = TransformerEncoder(
            encoder_layer,
            num_layers=num_encoder_layers,
            norm=nn.LayerNorm(input_feature_dim),
        )
        self.gate_net = nn.Sequential(
            nn.Linear(input_feature_dim, 64),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(64, 1),
            nn.Sigmoid(),
        )
        self.attention_aggregator = ABMILSlideEncoder(
            freeze=False,
            input_feature_dim=input_feature_dim,
            n_heads=abmil_n_heads,
            head_dim=abmil_head_dim,
            dropout=dropout,
            gated=True,
        )
        self.bag_norm = nn.LayerNorm(input_feature_dim)
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        features = x_dict["features"]  # [B, N, D]
        context_features = self.context_encoder(features)  # [B, N, D]
        gate = self.gate_net(features)  # [B, N, 1]
        fused_features = (1 - gate) * features + gate * context_features  # [B, N, D]
        bag_feature = self.attention_aggregator({"features": fused_features})  # [B, D]
        bag_feature = self.bag_norm(bag_feature)
        logits = self.classifier(bag_feature)
        return logits.squeeze(1)


# 5. LinearProbeModel
class LinearProbeModel(nn.Module):
    def __init__(self, input_feature_dim=feature_dim):
        super().__init__()
        self.input_feature_dim = input_feature_dim
        self.classifier = nn.Linear(self.input_feature_dim, 1)

    def forward(self, x_dict: Dict[str, torch.Tensor]) -> torch.Tensor:
        features = x_dict["features"]  # [B, N, D]
        bag_feature = torch.mean(features, dim=1)  # [B, D]
        logits = self.classifier(bag_feature)
        return logits.squeeze(1)


# 6. MAEMILModel
class MAEMILModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_head=8,
        num_encoder_layers=2,
        dim_feedforward=512,
        hidden_dim=256,
        dropout=0.1,
        mask_ratio=0.75,
    ):
        super().__init__()
        self.input_feature_dim = input_feature_dim
        self.mask_ratio = mask_ratio
        encoder_layer = TransformerEncoderLayer(
            d_model=input_feature_dim,
            nhead=n_head,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True,
        )
        self.transformer_encoder = TransformerEncoder(
            encoder_layer, num_layers=num_encoder_layers
        )
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        features = x_dict["features"]
        B, N, D = features.shape

        if self.training:
            num_visible = int(N * (1 - self.mask_ratio))
            if num_visible < 1:
                num_visible = 1
            noise = torch.rand(B, N, device=features.device)
            ids_shuffle = torch.argsort(noise, dim=1)
            ids_visible = ids_shuffle[:, :num_visible]
            features_visible = torch.gather(
                features, dim=1, index=ids_visible.unsqueeze(-1).expand(-1, -1, D)
            )
            context_features = self.transformer_encoder(features_visible)
        else:
            context_features = self.transformer_encoder(features)

        bag_feature = torch.mean(context_features, dim=1)
        logits = self.classifier(bag_feature)
        return logits.squeeze(1)


# --- 辅助函数：获取模型 ---
def get_model(model_type: str, input_dim: int) -> nn.Module:
    """根据 model_type 实例化并返回模型"""
    if model_type == "abmil":
        return BinaryClassificationModel(input_feature_dim=input_dim)
    elif model_type == "dsmil":
        return DSMILModel(input_feature_dim=input_dim)
    elif model_type == "transmil":
        return TransMILModel(input_feature_dim=input_dim)
    elif model_type == "hybrid_mil":
        return HybridAttnMILModel(input_feature_dim=input_dim)
    elif model_type == "linearprobe":
        return LinearProbeModel(input_feature_dim=input_dim)
    elif model_type == "maemil":
        return MAEMILModel(input_feature_dim=input_dim)
    else:
        raise ValueError(f"未知的 model_type: {model_type}")


# ----------------------------------------------------------------------------
# --- 5. DATASET 定义 (DATASET DEFINITIONS) ---
# ----------------------------------------------------------------------------


class H5Dataset(Dataset):
    """
    用于训练和内部验证的数据集。
    - 'train' 模式: 采样 'num_patches_to_sample' 个 patch。
    - 'val' (或 'test') 模式: 加载所有 patch。
    """

    def __init__(self, feats_path, df, split, split_col, num_patches_to_sample=1024):
        self.df = df[df[split_col] == split]
        self.feats_path = feats_path
        self.num_patches_to_sample = num_patches_to_sample
        self.split = split
        self.feature_dim = feature_dim

    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        row = self.df.iloc[idx]
        with h5py.File(
            os.path.join(self.feats_path, row["slide_id"] + ".h5"), "r"
        ) as f:
            features = torch.from_numpy(f["features"][:])  # 加载所有 [N_all, D]

        if self.split == "train":
            # 训练时的采样逻辑
            num_available = features.shape[0]
            if num_available >= self.num_patches_to_sample:
                # 不重复采样
                indices = torch.randperm(num_available)[: self.num_patches_to_sample]
            else:
                # 有重复采样
                indices = torch.randint(
                    num_available,
                    (self.num_patches_to_sample,),
                )
            features = features[indices]  # 采样后 [num_patches_to_sample, D]

        # 对于 'val', features 保持为 [N_all, D]
        label = torch.tensor(row[task_label_col], dtype=torch.float32)
        return features, label


class ExternalH5Dataset(Dataset):
    """
    用于外部测试的数据集。
    - 总是加载所有 patch。
    """

    def __init__(self, feats_path, df, label_col="label"):
        self.df = df
        self.feats_path = feats_path
        self.label_col = label_col

    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        row = self.df.iloc[idx]
        slide_id = row["slide_id"]
        with h5py.File(os.path.join(self.feats_path, slide_id + ".h5"), "r") as f:
            features = torch.from_numpy(f["features"][:])  # [N_all, D]
        label = torch.tensor(row[self.label_col], dtype=torch.float32)
        return features, label


# ----------------------------------------------------------------------------
# --- 6. 核心训练与评估函数 (CORE TRAINING & EVALUATION FUNCTIONS) ---
# ----------------------------------------------------------------------------


def train_one_epoch(
    model: nn.Module,
    loader: DataLoader,
    optimizer: optim.Optimizer,
    criterion: nn.Module,
    device: torch.device,
) -> float:
    """
    在训练集上训练一个 epoch。
    返回: 平均训练损失 (float)
    """
    model.train()  # 确保处于训练模式 (对 MAEMIL 很重要)
    total_loss = 0.0
    for features_batch, labels_batch in loader:
        # features_batch 形状: [B, N_sampled, D]
        # labels_batch 形状: [B]

        features_dict = {"features": features_batch.to(device)}
        labels = labels_batch.to(device)

        optimizer.zero_grad()
  
        outputs = model(features_dict)  # outputs 形状: [B]
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()

    return total_loss / len(loader)


def evaluate_model(
    model: nn.Module,
    loader: DataLoader,
    device: torch.device,
    criterion: Optional[nn.Module] = None,
) -> Tuple[float, float, float]:
    """
    在验证集或测试集上评估模型。
    loader 的 batch_size 应为 1。
    返回: (AUC, Accuracy, Avg_Loss)
    """
    model.eval()  # 确保处于评估模式
    all_labels, all_outputs = [], []
    correct, total = 0, 0
    total_loss = 0.0

    with torch.no_grad():
        for features_slide, labels_slide in loader:
            # features_slide 形状: [1, N_all, D]
            # labels_slide 形状: [1]
            features_dict = {"features": features_slide.to(device)}
            labels = labels_slide.to(device)

            outputs = model(features_dict)  # outputs 形状: [1]

            if criterion:
                loss = criterion(outputs, labels)
                total_loss += loss.item()

            # Sigmoid 激活以获得概率 (用于 AUC) 和预测 (用于 Acc)
            probs = torch.sigmoid(outputs)
            predicted = (probs > 0.5).float()

            correct += (predicted == labels).sum().item()
            total += labels.size(0)

            # 存储 logits 或 概率 均可用于 AUC。存储 logits (outputs) 更常见。
            all_outputs.append(outputs.cpu().numpy())
            all_labels.append(labels.cpu().numpy())

    # 计算指标
    all_outputs = np.concatenate(all_outputs)
    all_labels = np.concatenate(all_labels)

    auc = np.nan
    if len(np.unique(all_labels)) > 1:
        try:
            auc = roc_auc_score(all_labels, all_outputs)
        except Exception as e:
            logging.getLogger().warning(f"计算 AUC 时出错: {e}")
    else:
        logging.getLogger().warning("评估集中仅包含一个类别。无法计算 AUC。")

    accuracy = correct / total if total > 0 else 0.0
    avg_loss = total_loss / len(loader) if criterion and len(loader) > 0 else 0.0

    return auc, accuracy, avg_loss


# ----------------------------------------------------------------------------
# --- 7. 主执行逻辑 (MAIN EXECUTION) ---
# ----------------------------------------------------------------------------


def main():
    # 7.1 --- 初始化 ---
    logger = setup_logging(log_file_path)
    set_seed(SEED)
    os.makedirs(save_dir, exist_ok=True)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"使用设备: {device}")
    logger.info(f"特征路径 (CV): {feats_path}")
    logger.info(f"特征路径 (Ext): {external_feats_path}")
    logger.info(f"Batch Size: {batch_size}, Epochs: {num_epochs}, LR: {learning_rate}")
    logger.info(f"训练采样 Patch 数: {train_patches_sampled}")

    # 7.2 --- 加载数据 ---
    df = load_manifest(split_file_path, feats_path, task_label_col)
    if df is None:
        logger.error("无法加载主 manifest，训练中止。")
        return

    ext_df = load_manifest(external_manifest_path, external_feats_path, task_label_col)
    ext_loader = None
    if ext_df is not None:
        logger.info(f"外部测试集已加载: {len(ext_df)} 个样本。")
        ext_dataset = ExternalH5Dataset(external_feats_path, ext_df, task_label_col)
        ext_loader = DataLoader(ext_dataset, batch_size=1, shuffle=False)
    else:
        logger.warning("无法加载外部测试集。每个 epoch 的外部验证将被跳过。")

    # 7.3 --- 交叉验证和训练循环 ---
    all_model_results_cv = {}  # 存储 CV 结果
    all_model_results_ext = {}  # 存储最终的外部测试结果

    # --- 外层循环：遍历每种模型 ---
    for model_type in model_types_to_run:
        logger.info(f"\n{'='*60}")
        logger.info(f"------ 开始处理模型: {model_type.upper()} ------")
        logger.info(f"{'='*60}")

        all_fold_best_aucs = []  # 存储每折 *最佳* 的内部验证 AUC
        all_fold_best_accs = []  # 存储每折 *最佳* 的内部验证 Acc

        # --- 内层循环：5-折交叉验证 ---
        for fold_idx, current_split_col in enumerate(fold_columns):
            logger.info(f"\n{'='*50}")
            logger.info(
                f"--- 模型: {model_type.upper()} | 开始第 {fold_idx + 1}/{len(fold_columns)} 折 (使用列: {current_split_col}) ---"
            )
            logger.info(f"{'='*50}")

            # 1. 初始化模型、优化器、损失函数
            model = get_model(model_type, feature_dim).to(device)
            optimizer = optim.Adam(model.parameters(), lr=learning_rate)
            criterion = nn.BCEWithLogitsLoss()

            # 2. 创建 DataLoaders
            train_dataset = H5Dataset(
                feats_path, df, "train", current_split_col, train_patches_sampled
            )
            # *** 重命名: test_loader -> val_loader ***
            val_dataset = H5Dataset(
                feats_path,
                df,
                "test",
                current_split_col,  # 'test' 标签用于 H5Dataset 加载 *所有* patch
            )
            train_loader = DataLoader(
                train_dataset,
                batch_size=batch_size,
                shuffle=True,
                worker_init_fn=lambda _: np.random.seed(SEED),
            )
            # 验证时 batch_size 必须为 1
            val_loader = DataLoader(
                val_dataset,
                batch_size=1,
                shuffle=False,
                worker_init_fn=lambda _: np.random.seed(SEED),
            )
            logger.info(
                f"Fold {fold_idx + 1}: 训练样本 = {len(train_dataset)}, 验证样本 = {len(val_dataset)}"
            )

            # 3. 训练循环 (带 epoch 验证)
            best_val_auc = -1.0  # 跟踪此折的最佳验证 AUC
            best_epoch = -1
            model_save_path = os.path.join(
                save_dir, model_type, f"model_{current_split_col}_best.pth"
            )
            os.makedirs(os.path.dirname(model_save_path), exist_ok=True)

            for epoch in range(num_epochs):
                # 训练
                train_loss = train_one_epoch(
                    model, train_loader, optimizer, criterion, device
                )

                # 内部验证
                val_auc, val_acc, val_loss = evaluate_model(
                    model, val_loader, device, criterion
                )

                # *** 新功能: 每个 Epoch 进行外部验证 ***
                ext_auc_epoch, ext_acc_epoch = np.nan, np.nan
                if ext_loader:
                    # 注意: 我们不在乎这里的损失，所以 criterion=None
                    ext_auc_epoch, ext_acc_epoch, _ = evaluate_model(
                        model, ext_loader, device, criterion=None
                    )

                logger.info(
                    f"Epoch {epoch+1}/{num_epochs} | "
                    f"TrainLoss: {train_loss:.4f} | "
                    f"ValLoss: {val_loss:.4f} | "
                    f"ValAUC: {val_auc:.4f} | "
                    f"ValAcc: {val_acc:.4f} | "
                    f"ExtAUC: {ext_auc_epoch:.4f} | "
                    f"ExtAcc: {ext_acc_epoch:.4f}"
                )

                # *** 改进的模型保存逻辑 ***
                if val_auc > best_val_auc:
                    best_val_auc = val_auc
                    best_epoch = epoch + 1
                    all_fold_best_accs.append(val_acc)  # 存储与最佳 AUC 对应的 Acc
                    torch.save(model.state_dict(), model_save_path)
                    logger.info(
                        f"    -> 新的最佳验证 AUC: {best_val_auc:.4f}。模型已保存到: {model_save_path}"
                    )

            logger.info(f"--- Fold {fold_idx + 1} 结束 ---")
            logger.info(f"最佳内部验证 AUC: {best_val_auc:.4f} (在 Epoch {best_epoch})")
            all_fold_best_aucs.append(best_val_auc)

        # --- 当前模型交叉验证总结 ---
        logger.info(f"\n{'='*50}")
        logger.info(f"--- {model_type.upper()} 5-折交叉验证 (内部) 总结 ---")
        logger.info(f"{'='*50}")

        mean_auc = np.nanmean(all_fold_best_aucs)
        std_auc = np.nanstd(all_fold_best_aucs)
        mean_accuracy = np.nanmean(all_fold_best_accs)
        std_accuracy = np.nanstd(all_fold_best_accs)

        logger.info(f"平均 最佳Val-AUC: {mean_auc:.4f} \u00b1 {std_auc:.4f}")
        logger.info(f"平均 最佳Val-Acc: {mean_accuracy:.4f} \u00b1 {std_accuracy:.4f}")

        all_model_results_cv[model_type] = {
            "mean_auc": mean_auc,
            "std_auc": std_auc,
            "mean_accuracy": mean_accuracy,
            "std_accuracy": std_accuracy,
        }

    # 7.4 --- 最终内部 CV 总结 ---
    logger.info(f"\n{'='*60}")
    logger.info("------ 最终所有模型 5-折 CV (内部验证) 对比 ------")
    logger.info(f"{'='*60}")
    for model_type, results in all_model_results_cv.items():
        logger.info(f"模型: {model_type.upper()}")
        logger.info(
            f"  平均 最佳Val-AUC: {results['mean_auc']:.4f} \u00b1 {results['std_auc']:.4f}"
        )
        logger.info(
            f"  平均 最佳Val-Acc: {results['mean_accuracy']:.4f} \u00b1 {results['std_accuracy']:.4f}"
        )

    # 7.5 --- 最终外部测试 (使用保存的最佳模型) ---
    if ext_loader is None:
        logger.info("\n跳过最终的外部测试，因为外部数据集未加载。")
        logger.info(f"所有训练完成。日志已保存到 {log_file_path}")
        return

    logger.info(f"\n{'='*60}")
    logger.info("------ 开始最终外部测试 (加载每个折的最佳模型) ------")
    logger.info(f"{'='*60}")

    for model_type in model_types_to_run:
        logger.info(f"\n{'-'*50}")
        logger.info(f"外部测试: {model_type.upper()}")
        logger.info(f"{'-'*50}")

        fold_aucs, fold_accs = [], []

        for fold_col in fold_columns:
            model_path = os.path.join(
                save_dir, model_type, f"model_{fold_col}_best.pth"
            )
            if not os.path.exists(model_path):
                logger.warning(f"模型文件不存在，跳过: {model_path}")
                continue

            try:
                # 1. 加载模型和权重
                model = get_model(model_type, feature_dim).to(device)
                model.load_state_dict(torch.load(model_path, map_location=device))

                # 2. 使用重构的 evaluate_model 函数
                auc, acc, _ = evaluate_model(model, ext_loader, device, criterion=None)

                logger.info(
                    f"  {fold_col} (最佳模型) -> Ext AUC: {auc:.4f}, Ext Acc: {acc:.4f}"
                )
                fold_aucs.append(auc)
                fold_accs.append(acc)

            except Exception as e:
                logger.error(f"加载或评估模型 {model_path} 时出错: {e}")

        # 总结此模型的外部测试
        if fold_aucs:
            mean_auc = np.nanmean(fold_aucs)
            std_auc = np.nanstd(fold_aucs)
            mean_acc = np.nanmean(fold_accs)
            std_acc = np.nanstd(fold_accs)

            all_model_results_ext[model_type] = {
                "mean_auc": mean_auc,
                "std_auc": std_auc,
                "mean_acc": mean_acc,
                "std_acc": std_acc,
            }
            logger.info(
                f"  [汇总] {model_type.upper()} 外部测试 AUC: {mean_auc:.4f} ± {std_auc:.4f}"
            )
            logger.info(
                f"  [汇总] {model_type.upper()} 外部测试 Acc: {mean_acc:.4f} ± {std_acc:.4f}"
            )

    # 7.6 --- 最终所有模型外部测试总结 ---
    if all_model_results_ext:
        logger.info(f"\n{'='*60}")
        logger.info("------ 外部测试最终结果汇总 (使用最佳模型) ------")
        logger.info(f"{'='*60}")
        for model_type, res in all_model_results_ext.items():
            logger.info(
                f"{model_type.upper():12} | AUC: {res['mean_auc']:.4f} ± {res['std_auc']:.4f} | "
                f"Acc: {res['mean_acc']:.4f} ± {res['std_acc']:.4f}"
            )

    logger.info(f"\n所有训练和评估完成。日志已保存到 {log_file_path}")


if __name__ == "__main__":
    main()
