import os
import sys
import datetime
import logging
from pathlib import Path
import numpy as np
import pandas as pd
import h5py
from tqdm import tqdm

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F

from sklearn.metrics import roc_auc_score, accuracy_score

# ==============================================================================
# --- [1. 配置] ---
# ==============================================================================

CONFIG = {
    # --- 运行设置 ---
    "gpu_id": "2",
    "seed": 42,
    "run_mode": "full",  # 'full', 'train_only', 'test_only'
    "num_workers": 4,
    # --- 路径和标签 ---
    "log_dir": "logs",
    "ml_save_dir": "saved_models/saved_multimodal_models_v2",  # [MODIFIED] V2
    "error_log_dir": "error_logs_v2",  # [MODIFIED] V2
    "fold_columns": ["fold_0", "fold_1", "fold_2", "fold_3", "fold_4"],
    "task_label_col": "label",
    # --- 训练/验证 数据 (k=all) ---
    "train_val_split_file": "/data0/lcy/Patho-Bench/tools/zzylnm_slices_splits/splits/k=all.tsv",
    "train_val_mod1_feats_path": "/data0/lcy/data/LNM/LNM_slices_uni_v1_processed/10x_512px_0px_overlap/features_uni_v1",  # WSI
    "train_val_mod2_feats_root": "/data0/lcy/data/LNM/vista3d_nfy/prediction",  # VISTA
    "train_val_mod2_lookup_col": "case_id",
    "train_val_mod2_endswith": "_0000",
    # --- 外部测试 数据 (zhujiang) ---
    "external_test_df_path": "/data0/lcy/Patho-Bench/tools/zzylnm_zhujiang_splits/cohort.tsv",
    "external_test_label_col": "label",
    "external_test_mod1_feats_path": "/data0/lcy/data/LNM/LNM_Zhujiang_uni_v1_processed/10x_512px_0px_overlap/features_uni_v1",
    "external_test_mod2_feats_root": "/data0/lcy/data/LNM/vista3d/prediction",
    "external_test_mod2_lookup_col": "matched_vista_key",
    "external_test_mod2_endswith": "output",
    "external_test_model_path": "saved_models/saved_multimodal_models_v2/multimodal_model_fold_1_best.pth",  # [MODIFIED] V2 (fold_1_best.pth)
    # --- 训练超参数 ---
    "num_epochs": 20,  # [MODIFIED] 增加 Epochs 以配合 Early Stopping
    "batch_size": 1,  # MIL 模式, batch_size=1
    "learning_rate": 1e-4,
    "weight_decay": 1e-5,
    "patience": 5,  # [NEW] Early stopping patience
    # --- 模型超参数 (CrossAttentionMAEMIL) ---
    "model_params": {
        "d_mod1": 1024,  # WSI (1024 for UNI, 512 for Conch)
        "d_mod2": 3,  # VISTA (mean, max, std)
        "embed_dim": 128,
        "num_layers": 2,
        "num_heads": 8,
        "ff_dim": 256,
        "classifier_hidden_dim": 128,
        "dropout": 0.3,
        "mask_ratio": 0.75,
        "apply_instance_norm": True,  # [NEW] 是否在 Dataset 中应用实例归一化
    },
}

# ==============================================================================
# --- [2. 辅助函数 (日志, 种子, VISTA) ] ---
# ==============================================================================


def setup_logging(log_dir):
    """配置日志系统, 同时输出到文件和控制台"""
    os.makedirs(log_dir, exist_ok=True)

    timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M")
    log_file_path = os.path.join(log_dir, f"{timestamp}_cross_maemil_v2_training.log")

    # 清除任何现有的 handlers
    logging.getLogger().handlers = []

    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s - %(levelname)s - %(message)s",
        datefmt="%Y-%m-%d %H:%M:%S",
        handlers=[
            logging.FileHandler(log_file_path, mode="w"),
            logging.StreamHandler(sys.stdout),
        ],
    )
    logger = logging.getLogger()
    logger.info("日志系统已启动。")
    return logger


def set_seed(seed_value):
    """设置所有随机种子以保证可复现性"""
    np.random.seed(seed_value)
    torch.manual_seed(seed_value)
    torch.cuda.manual_seed_all(seed_value)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False


def build_vista3d_map(vista_root_dir, use_endswith, logger):
    """
    扫描 VISTA 3D 预测根目录, 构建一个 slide_id -> h5_file_path 的映射。
    """
    logger.info(f"开始构建模态2 (3D VISTA) 路径映射... 根目录: {vista_root_dir}")
    mod2_path_map = {}
    root_dir = Path(vista_root_dir)

    if not root_dir.exists():
        logger.error(f"模态2 (3D VISTA) 根目录不存在: {vista_root_dir}")
        return mod2_path_map

    for case_dir in root_dir.iterdir():
        if not case_dir.is_dir():
            continue

        if not case_dir.name.endswith(use_endswith):
            continue

        slide_id = case_dir.name.split(use_endswith)[0]
        pred_files = list(case_dir.glob("*_pred.h5"))

        if not pred_files:
            continue

        if len(pred_files) > 1:
            logger.warning(
                f"在 {case_dir} 中有多个 *_pred.h5 文件, 仅使用第一个: {pred_files[0]}"
            )

        mod2_path_map[slide_id] = pred_files[0]

    logger.info(
        f"模态2 (3D VISTA) 映射构建完毕。共找到 {len(mod2_path_map)} 个对应的 .h5 文件。"
    )
    return mod2_path_map


# ==============================================================================
# --- [3. 数据处理 (Dataset, Collate, Loader) ] ---
# ==============================================================================


class MultiModalH5Dataset(Dataset):
    def __init__(
        self,
        df,
        split,
        mod1_feats_path,
        mod2_path_map,
        d_mod1,  # 从 config 传入
        d_mod2,  # 从 config 传入
        label_col,  # 标签列名
        mod2_lookup_col,  # VISTA 映射的列
        apply_instance_norm=False,  # [NEW]
        split_col=None,
    ):
        self.split = split

        if split_col is None and split == "external_test":
            self.df = df.copy()
            if label_col not in df.columns:
                raise ValueError(f"external_label_col '{label_col}' 不在 DataFrame 中")
        else:
            self.df = df[df[split_col] == split]

        self.label_col_to_use = label_col
        self.mod1_feats_path = mod1_feats_path
        self.mod2_path_map = mod2_path_map
        self.d_mod1 = d_mod1
        self.d_mod2 = d_mod2
        self.mod2_key_column_name = mod2_lookup_col
        self.apply_instance_norm = apply_instance_norm

        if mod2_lookup_col not in self.df.columns:
            raise ValueError(f"mod2_lookup_col '{mod2_lookup_col}' 不在 DataFrame 中。")

        self.generator = torch.Generator().manual_seed(CONFIG["seed"])

    def __len__(self):
        return len(self.df)

    def _apply_norm(self, features):
        # [NEW] 实例级归一化 (Instance-level Normalization)
        # 这有助于减少训练集和外部测试集之间的域偏移
        if self.apply_instance_norm and features.shape[0] > 1:
            features = (features - features.mean(dim=0)) / (features.std(dim=0) + 1e-6)
        return features

    def _load_mod1_features(self, slide_id):
        """加载 WSI patch 特征 (模态1) - 返回 FULL BAG"""
        try:
            with h5py.File(
                os.path.join(self.mod1_feats_path, slide_id + ".h5"), "r"
            ) as f:
                features = torch.from_numpy(f["features"][:])  # (N, D1)

            if features.shape[0] == 0:
                # logger.warning(f"模态1 (WSI) {slide_id}.h5 为空, 返回零张量。")
                return torch.zeros(1, self.d_mod1).float()

            # [NEW] 应用实例归一化
            features = self._apply_norm(features)

            return features.float()

        except Exception as e:
            # logger.warning(f"加载模态1 (WSI) 失败: {slide_id}.h5, 错误: {e}")
            return torch.zeros(1, self.d_mod1).float()

    def _load_mod2_features(self, mod2_key):
        """加载 3D VISTA 特征 (模态2) - 返回 FULL BAG"""
        mod2_path = self.mod2_path_map.get(mod2_key)

        if not mod2_path:
            # logger.warning(f"未找到模态2 (3D VISTA) 映射: {mod2_key}")
            return torch.zeros(1, self.d_mod2).float()

        try:
            with h5py.File(mod2_path, "r") as f:
                data = f["prediction"][:].squeeze(0)  # -> (512, 512, D_slices)

                if data.ndim != 3 or data.shape[0] != 512 or data.shape[1] != 512:
                    # logger.warning(f"模态2 (3D VISTA) {mod2_key} 形状不正确: {data.shape}")
                    return torch.zeros(1, self.d_mod2).float()

                # D_slices 在最后一个维度
                slice_means = np.mean(data, axis=(0, 1))  # (D_slices,)
                slice_maxs = np.max(data, axis=(0, 1))  # (D_slices,)
                slice_stds = np.std(data, axis=(0, 1))  # (D_slices,)

                # 堆叠后 -> (D_slices, 3)
                features = np.stack([slice_means, slice_maxs, slice_stds], axis=-1)
                features = torch.from_numpy(features)

            if features.shape[0] == 0:
                # logger.warning(f"模态2 (3D VISTA) {mod2_key} 聚合后为空, 返回零张量。")
                return torch.zeros(1, self.d_mod2).float()

            # [NEW] 应用实例归一化
            features = self._apply_norm(features)

            return features.float()

        except Exception as e:
            # logger.warning(f"加载模态2 (3D VISTA) 失败: {mod2_key} (at {mod2_path}), 错误: {e}")
            return torch.zeros(1, self.d_mod2).float()

    def __getitem__(self, idx):
        row = self.df.iloc[idx]
        slide_id = row["slide_id"]
        mod2_key_to_load = row[self.mod2_key_column_name]

        features_mod1 = self._load_mod1_features(slide_id)
        features_mod2 = self._load_mod2_features(mod2_key_to_load)

        label_val = row[self.label_col_to_use]
        label = torch.tensor(label_val, dtype=torch.float32)

        return features_mod1, features_mod2, label


def custom_collate_fn(batch):
    """
    自定义的 collate_fn, 用于处理可变大小的 bags (当 batch_size > 1 时)。
    在 MIL (batch_size=1) 中, 它只是简单地传递。
    """
    # 保持特征为 list, 将标签堆叠
    mod1_list, mod2_list, label_list = zip(*batch)
    labels = torch.stack(label_list)
    return mod1_list, mod2_list, labels


def load_and_filter_data(config, logger):
    """
    加载并过滤用于训练/验证的主数据集
    """
    logger.info("--- 开始加载和过滤 训练/验证 数据 ---")
    split_file_path = config["train_val_split_file"]
    parquet_path = split_file_path.replace(".tsv", ".parquet")

    if os.path.exists(parquet_path):
        logger.info(f"正在从 Parquet 文件加载: {parquet_path}")
        df = pd.read_parquet(parquet_path)
    else:
        logger.info(f"首次加载 TSV 文件: {split_file_path}")
        df = pd.read_csv(split_file_path, sep="\t")
        logger.info(f"正在保存为 Parquet 格式: {parquet_path}")
        df.to_parquet(parquet_path)

    # 1. 过滤模态1 (WSI)
    feats_path_mod1 = config["train_val_mod1_feats_path"]
    available_files_mod1 = [
        f.replace(".h5", "") for f in os.listdir(feats_path_mod1) if f.endswith(".h5")
    ]
    available_files_mod1_set = set(available_files_mod1)

    initial_count = len(df["slide_id"].unique())
    df = df[df["slide_id"].isin(available_files_mod1_set)].copy()
    filtered_count_1 = len(df["slide_id"].unique())
    logger.info(
        f"模态1 (WSI) 过滤：从 {initial_count} 减少到 {filtered_count_1} 个 (基于 {feats_path_mod1})"
    )

    # 2. 构建模态2 (3D VISTA) 映射
    vista_3d_map = build_vista3d_map(
        config["train_val_mod2_feats_root"], config["train_val_mod2_endswith"], logger
    )

    # 3. 过滤模态2 (3D VISTA)
    available_files_mod2_set = set(vista_3d_map.keys())
    mod2_lookup_col = config["train_val_mod2_lookup_col"]

    df = df[df[mod2_lookup_col].isin(available_files_mod2_set)].copy()
    final_count = len(df[mod2_lookup_col].unique())
    logger.info(
        f"多模态过滤：从 {filtered_count_1} 减少到 {final_count} 个 (基于 模态2 映射)"
    )

    # 检查标签分布
    task_label_col = config["task_label_col"]
    df_counts = (
        df.drop_duplicates(subset=["case_id"])[task_label_col]
        .value_counts()
        .reset_index()
    )
    df_counts.columns = [task_label_col, "Count"]
    logger.info(f"最终 Case 标签分布:\n{df_counts}")

    return df, vista_3d_map


# ==============================================================================
# --- [4. 模型架构 (CrossAttentionMAEMIL V2) ] ---
# ==============================================================================


class CrossAttentionBlock(nn.Module):
    """
    一个对称的交叉注意力模块。
    A attends to B, and B attends to A.
    """

    def __init__(self, embed_dim, num_heads, ff_dim, dropout=0.1):
        super().__init__()
        # --- A attends to B ---
        self.attn_a_to_b = nn.MultiheadAttention(
            embed_dim, num_heads, dropout=dropout, batch_first=True
        )
        self.norm1_a = nn.LayerNorm(embed_dim)
        self.ff_a = nn.Sequential(
            nn.Linear(embed_dim, ff_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(ff_dim, embed_dim),
            nn.Dropout(dropout),
        )
        self.norm2_a = nn.LayerNorm(embed_dim)

        # --- B attends to A ---
        self.attn_b_to_a = nn.MultiheadAttention(
            embed_dim, num_heads, dropout=dropout, batch_first=True
        )
        self.norm1_b = nn.LayerNorm(embed_dim)
        self.ff_b = nn.Sequential(
            nn.Linear(embed_dim, ff_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(ff_dim, embed_dim),
            nn.Dropout(dropout),
        )
        self.norm2_b = nn.LayerNorm(embed_dim)

    def forward(self, tokens_a, tokens_b):
        # 1. A attends to B (A=Query, B=Key/Value)
        attn_output_a, _ = self.attn_a_to_b(
            query=tokens_a, key=tokens_b, value=tokens_b
        )
        tokens_a = self.norm1_a(tokens_a + attn_output_a)
        tokens_a = self.norm2_a(tokens_a + self.ff_a(tokens_a))

        # 2. B attends to A (B=Query, A=Key/Value)
        attn_output_b, _ = self.attn_b_to_a(
            query=tokens_b, key=tokens_a, value=tokens_a
        )
        tokens_b = self.norm1_b(tokens_b + attn_output_b)
        tokens_b = self.norm2_b(tokens_b + self.ff_b(tokens_b))

        return tokens_a, tokens_b


class CrossAttentionMAEMIL(nn.Module):
    """
    多模态 MAE-MIL 模型。
    [MODIFIED] V2: 增加了 Attention Pooling
    """

    def __init__(self, model_config):
        super().__init__()

        # 从 config 解包
        d_mod1 = model_config["d_mod1"]
        d_mod2 = model_config["d_mod2"]
        embed_dim = model_config["embed_dim"]
        num_layers = model_config["num_layers"]
        num_heads = model_config["num_heads"]
        ff_dim = model_config["ff_dim"]
        classifier_hidden_dim = model_config["classifier_hidden_dim"]
        dropout = model_config["dropout"]
        self.mask_ratio = model_config["mask_ratio"]

        # 1. 模态嵌入层
        self.embed_mod1 = nn.Linear(d_mod1, embed_dim)
        self.embed_mod2 = nn.Linear(d_mod2, embed_dim)

        # 2. 交叉注意力融合层
        self.fusion_layers = nn.ModuleList(
            [
                CrossAttentionBlock(embed_dim, num_heads, ff_dim, dropout)
                for _ in range(num_layers)
            ]
        )

        # 3. [NEW] Attention Pooling (ABMIL)
        # 替换掉原来有缺陷的 torch.mean()
        self.attention_pool = nn.Sequential(
            nn.Linear(embed_dim, embed_dim // 2),
            nn.ReLU(),
            nn.Linear(embed_dim // 2, 1),
        )

        # 4. 分类头
        self.classifier_head = nn.Sequential(
            nn.LayerNorm(embed_dim),
            nn.Linear(embed_dim, classifier_hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(classifier_hidden_dim, 1),
        )

    def _apply_mae_mask(self, tokens, d_embed):
        """
        辅助函数, 对 (B=1, N, D) 张量应用 MAE 掩码。
        返回 (1, N_visible, D)
        """
        B, N, D = tokens.shape  # B 始终为 1

        num_visible = int(N * (1 - self.mask_ratio))
        if num_visible < 1:
            num_visible = 1  # 确保至少有1个 patch 可见

        if num_visible > N:
            num_visible = N

        noise = torch.rand(B, N, device=tokens.device)
        ids_shuffle = torch.argsort(noise, dim=1)
        ids_visible = ids_shuffle[:, :num_visible]

        tokens_visible = torch.gather(
            tokens, dim=1, index=ids_visible.unsqueeze(-1).expand(-1, -1, d_embed)
        )
        return tokens_visible

    def forward(self, x_mod1, x_mod2):
        """
        前向传播 (处理一个样本, 即一个 bag)
        x_mod1: (N, d_mod1)
        x_mod2: (D, d_mod2)
        """
        # 1. 嵌入
        tokens_mod1 = self.embed_mod1(x_mod1)  # (N, embed_dim)
        tokens_mod2 = self.embed_mod2(x_mod2)  # (D, embed_dim)

        d_embed = tokens_mod1.shape[1]

        # 2. 深度融合
        t1 = tokens_mod1.unsqueeze(0)  # (1, N, embed_dim)
        t2 = tokens_mod2.unsqueeze(0)  # (1, D, embed_dim)

        if self.training:
            # 训练模式: 应用掩码
            if t1.shape[1] > 1:  # 确保 bag 不为空
                t1 = self._apply_mae_mask(t1, d_embed)  # (1, N_vis, embed_dim)
            if t2.shape[1] > 1:  # 确保 bag 不为空
                t2 = self._apply_mae_mask(t2, d_embed)  # (1, D_vis, embed_dim)

        # 将 (可能被掩码的) tokens 送入融合层
        for layer in self.fusion_layers:
            t1, t2 = layer(t1, t2)

        t1 = t1.squeeze(0)  # (N_vis or N, embed_dim)
        t2 = t2.squeeze(0)  # (D_vis or D, embed_dim)

        # 3. [MODIFIED] 聚合
        all_fused_tokens = torch.cat([t1, t2], dim=0)  # (N_vis+D_vis, embed_dim)

        # 3a. [NEW] Attention Pooling
        # 计算每个 token 的重要性
        attn_weights = self.attention_pool(all_fused_tokens)  # (N_vis+D_vis, 1)
        attn_weights = F.softmax(attn_weights, dim=0)  # (N_vis+D_vis, 1)

        # 3b. [NEW] 加权平均
        pooled_vec = torch.sum(all_fused_tokens * attn_weights, dim=0)  # (embed_dim,)

        # 4. 分类
        logit = self.classifier_head(pooled_vec)  # (1,)
        return logit


# ==============================================================================
# --- [5. 核心逻辑 (训练, 测试) ] ---
# ==============================================================================


def run_evaluation(model, data_loader, device, desc):
    """
    [NEW] 抽离出的评估函数 (用于内部测试和外部测试)
    """
    model.eval()
    all_preds = []
    all_labels = []

    pbar = tqdm(data_loader, desc=desc, leave=False)
    with torch.no_grad():
        for mod1_batch_list, mod2_batch_list, labels in pbar:
            all_labels.extend(labels.cpu().numpy())  # (B,)
            current_batch_size = len(mod1_batch_list)

            for i in range(current_batch_size):
                x_mod1 = mod1_batch_list[i].to(device)
                x_mod2 = mod2_batch_list[i].to(device)

                if x_mod1.shape[0] == 0 or x_mod2.shape[0] == 0:
                    # 处理加载失败的空 bag
                    all_preds.append(0.5)  # 预测为 0.5
                    continue

                logit = model(x_mod1, x_mod2)  # (1,)
                prob = torch.sigmoid(logit).cpu().item()
                all_preds.append(prob)

    y_true = np.array(all_labels)
    y_pred_proba = np.array(all_preds)

    auc = np.nan
    if len(np.unique(y_true)) > 1:
        auc = roc_auc_score(y_true, y_pred_proba)

    y_pred_class = (y_pred_proba > 0.5).astype(int)
    acc = accuracy_score(y_true, y_pred_class)

    return auc, acc, y_true, y_pred_proba


def train_one_fold(
    config, logger, df, vista_3d_map, current_split_col, fold_idx, device
):
    """
    为 5-折交叉验证 训练和评估一个折
    [MODIFIED] V2: 增加了 Early Stopping
    """
    logger.info(
        f"\n--- (MAEMIL V2) 开始第 {fold_idx + 1}/{len(config['fold_columns'])} 折 ({current_split_col}) ---"
    )

    # 1. 创建数据集和加载器
    apply_norm = config["model_params"].get("apply_instance_norm", False)

    train_dataset = MultiModalH5Dataset(
        df=df,
        split="train",
        split_col=current_split_col,
        mod1_feats_path=config["train_val_mod1_feats_path"],
        mod2_path_map=vista_3d_map,
        d_mod1=config["model_params"]["d_mod1"],
        d_mod2=config["model_params"]["d_mod2"],
        label_col=config["task_label_col"],
        mod2_lookup_col=config["train_val_mod2_lookup_col"],
        apply_instance_norm=apply_norm,
    )
    test_dataset = MultiModalH5Dataset(
        df=df,
        split="test",
        split_col=current_split_col,
        mod1_feats_path=config["train_val_mod1_feats_path"],
        mod2_path_map=vista_3d_map,
        d_mod1=config["model_params"]["d_mod1"],
        d_mod2=config["model_params"]["d_mod2"],
        label_col=config["task_label_col"],
        mod2_lookup_col=config["train_val_mod2_lookup_col"],
        apply_instance_norm=apply_norm,  # 在测试集上也应用
    )

    train_loader = DataLoader(
        train_dataset,
        batch_size=config["batch_size"],
        shuffle=True,
        num_workers=config["num_workers"],
        pin_memory=True,
        collate_fn=custom_collate_fn,
    )
    test_loader = DataLoader(
        test_dataset,
        batch_size=config["batch_size"],
        shuffle=False,
        num_workers=config["num_workers"],
        pin_memory=True,
        collate_fn=custom_collate_fn,
    )

    logger.info(
        f"Fold {fold_idx + 1}: 训练样本 = {len(train_dataset)}, 测试样本 = {len(test_dataset)}"
    )

    # 2. 初始化模型, 损失和优化器
    model = CrossAttentionMAEMIL(config["model_params"]).to(device)
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(
        model.parameters(),
        lr=config["learning_rate"],
        weight_decay=config["weight_decay"],
    )

    # 3. [NEW] Early Stopping 相关变量
    best_test_auc = 0.0
    epochs_no_improve = 0
    patience = config.get("patience", 5)  # 默认为 5
    best_model_path = os.path.join(
        config["ml_save_dir"], f"multimodal_model_fold_{fold_idx + 1}_best.pth"
    )

    num_epochs = config["num_epochs"]
    final_fold_results = {"auc": np.nan, "acc": np.nan}

    for epoch in range(num_epochs):
        model.train()
        train_pbar = tqdm(
            train_loader,
            desc=f"Fold {fold_idx+1} Epoch {epoch + 1}/{num_epochs} [Train]",
            leave=False,
        )

        total_loss = 0.0
        for mod1_batch_list, mod2_batch_list, labels in train_pbar:
            labels = labels.to(device).float().unsqueeze(1)  # (B, 1)
            current_batch_size = labels.shape[0]

            optimizer.zero_grad()

            batch_logits = []
            valid_labels = []

            for i in range(current_batch_size):
                x_mod1 = mod1_batch_list[i].to(device)
                x_mod2 = mod2_batch_list[i].to(device)

                # [NEW] 检查空 bag, 如果任一模态为空, 则跳过此样本
                if x_mod1.shape[0] == 0 or x_mod2.shape[0] == 0:
                    logger.warning(
                        f"跳过空 bag (Mod1: {x_mod1.shape[0]}, Mod2: {x_mod2.shape[0]})"
                    )
                    continue

                logit = model(x_mod1, x_mod2)
                batch_logits.append(logit)
                valid_labels.append(labels[i])

            if not batch_logits:  # 如果整个 batch 都被跳过了
                continue

            logits = torch.stack(batch_logits)  # (B_valid, 1)
            valid_labels_tensor = torch.stack(valid_labels)  # (B_valid, 1)

            loss = criterion(logits, valid_labels_tensor)
            loss.backward()
            optimizer.step()

            total_loss += loss.item()
            train_pbar.set_postfix(loss=loss.item())

        avg_train_loss = total_loss / len(train_loader)

        # --- [MODIFIED] 评估 (每个 epoch) ---
        eval_desc = f"Fold {fold_idx+1} Epoch {epoch + 1}/{num_epochs} [Test]"
        test_auc, test_acc, _, _ = run_evaluation(model, test_loader, device, eval_desc)

        logger.info(
            f"Epoch {epoch + 1}/{num_epochs} | "
            f"Train Loss: {avg_train_loss:.4f} | "
            f"Test AUC: {test_auc:.4f} | "
            f"Test Acc: {test_acc:.4f}"
        )

        # --- [NEW] Early Stopping 逻辑 ---
        if test_auc > best_test_auc:
            logger.info(
                f"  -> Test AUC 提升 ({best_test_auc:.4f} -> {test_auc:.4f}). 保存模型..."
            )
            best_test_auc = test_auc
            final_fold_results["auc"] = test_auc
            final_fold_results["acc"] = test_acc
            epochs_no_improve = 0
            torch.save(model.state_dict(), best_model_path)
            logger.info(f"   模型已保存至: {best_model_path}")
        else:
            epochs_no_improve += 1
            logger.info(
                f"  -> Test AUC 未提升 ({test_auc:.4f}). "
                f"Patience: {epochs_no_improve}/{patience}"
            )

        if epochs_no_improve >= patience:
            logger.info(f"--- Early stopping 在 Epoch {epoch + 1} 触发 ---")
            break

    logger.info(f"--- Fold {fold_idx + 1} (MAEMIL V2) 结束 ---")
    logger.info(f"   最佳 Test AUC: {final_fold_results['auc']:.4f}")
    logger.info(f"   最佳 Test Acc: {final_fold_results['acc']:.4f}")

    return final_fold_results["auc"], final_fold_results["acc"]


def run_external_test(config, logger, device):
    """
    加载保存的模型并在外部数据集上进行测试
    [MODIFIED] V2: 使用抽离的 run_evaluation
    """
    logger.info(f"\n{'='*60}")
    logger.info("--- 开始 MAEMIL V2 外部数据集测试 ---")
    logger.info(f"{'='*60}")

    model_path = config["external_test_model_path"]
    external_df_path = config["external_test_df_path"]
    external_label_col = config["external_test_label_col"]
    external_mod1_feats_path = config["external_test_mod1_feats_path"]
    external_mod2_feats_root = config["external_test_mod2_feats_root"]

    try:
        # 1. 检查模型
        if not os.path.exists(model_path):
            logger.error(f"找不到模型文件: {model_path}")
            return

        # 2. 检查外部 DF
        try:
            external_df = pd.read_csv(external_df_path, sep="\t")
            logger.info(
                f"成功加载外部 manifest: {external_df_path} (共 {len(external_df)} 行)"
            )
        except FileNotFoundError:
            logger.error(f"找不到外部 manifest 文件: {external_df_path}")
            return

        # 3. 过滤 (模态1 - WSI)
        ext_available_files_mod1 = [
            f.replace(".h5", "")
            for f in os.listdir(external_mod1_feats_path)
            if f.endswith(".h5")
        ]
        ext_available_files_mod1_set = set(ext_available_files_mod1)

        original_count_1 = len(external_df)
        external_df = external_df[
            external_df["slide_id"].isin(ext_available_files_mod1_set)
        ].copy()
        filtered_count_1 = len(external_df)
        logger.info(
            f"过滤外部 DF (模态1 - WSI)：原始 {original_count_1} 个, 剩余 {filtered_count_1} 个"
        )

        # 4. 构建并过滤 (模态2 - 3D VISTA)
        external_vista_3d_map = build_vista3d_map(
            external_mod2_feats_root, config["external_test_mod2_endswith"], logger
        )

        available_prefixes = list(external_vista_3d_map.keys())
        original_count_2 = len(external_df)

        if not available_prefixes:
            logger.error("模态2 (VISTA) 映射为空, 无法进行前缀匹配。")
            return

        logger.info(
            f"将使用 {len(available_prefixes)} 个 VISTA 键作为前缀进行弱匹配..."
        )

        def find_prefix_match(case_id, prefixes):
            for prefix in prefixes:
                if case_id.startswith(prefix):
                    return prefix
            return np.nan

        lookup_col_name = config["external_test_mod2_lookup_col"]
        external_df[lookup_col_name] = external_df["case_id"].apply(
            lambda cid: find_prefix_match(cid, available_prefixes)
        )

        external_df = external_df.dropna(subset=[lookup_col_name]).copy()
        filtered_count_2 = len(external_df)

        logger.info(
            f"过滤外部 DF (模态2 - VISTA)：从 {original_count_2} 个, 剩余 {filtered_count_2} 个"
        )

        if filtered_count_2 == 0:
            logger.error("过滤后没有剩余的样本可供测试。")
            return

        # 5. 创建 Dataset 和 DataLoader
        apply_norm = config["model_params"].get("apply_instance_norm", False)
        test_dataset = MultiModalH5Dataset(
            df=external_df,
            split="external_test",
            split_col=None,
            mod1_feats_path=external_mod1_feats_path,
            mod2_path_map=external_vista_3d_map,
            d_mod1=config["model_params"]["d_mod1"],
            d_mod2=config["model_params"]["d_mod2"],
            label_col=external_label_col,
            mod2_lookup_col=lookup_col_name,
            apply_instance_norm=apply_norm,  # [NEW] 同样应用归一化
        )

        test_loader = DataLoader(
            test_dataset,
            batch_size=config["batch_size"],
            shuffle=False,
            num_workers=config["num_workers"],
            pin_memory=True,
            collate_fn=custom_collate_fn,
        )

        slide_ids_in_order = test_dataset.df["slide_id"].tolist()
        logger.info(f"准备好 {len(test_dataset)} 个外部测试样本。")

        # 6. 加载模型
        logger.info(f"加载模型: {model_path}")
        model = CrossAttentionMAEMIL(config["model_params"]).to(device)
        model.load_state_dict(torch.load(model_path, map_location=device))

        model_name_simple = os.path.basename(model_path).replace(".pth", "")

        # 7. [MODIFIED] 评估
        logger.info("开始评估...")
        auc, acc, y_external, y_pred_proba = run_evaluation(
            model, test_loader, device, "External Test"
        )

        y_pred_class = (y_pred_proba > 0.5).astype(int)

        # 8. 保存错误预测样本
        try:
            incorrect_indices = np.where(y_external != y_pred_class)[0]
            logger.info(
                f"在 {len(y_external)} 个样本中发现 {len(incorrect_indices)} 个错误预测。"
            )

            if len(incorrect_indices) > 0:
                error_data = []
                for idx in incorrect_indices:
                    error_data.append(
                        {
                            "slide_id": slide_ids_in_order[idx],
                            "true_label": int(y_external[idx]),
                            "predicted_label": int(y_pred_class[idx]),
                            "predicted_proba_class_1": float(y_pred_proba[idx]),
                        }
                    )

                error_df = pd.DataFrame(error_data)
                error_filename = f"errors_{model_name_simple}_external_test.csv"
                error_save_path = os.path.join(config["error_log_dir"], error_filename)
                error_df.to_csv(error_save_path, index=False)
                logger.info(f"错误预测样本已保存到: {error_save_path}")

        except Exception as e:
            logger.error(f"保存错误预测样本时失败: {e}")

        # 9. 记录最终结果
        logger.info(f"--- 外部 测试结果 (模型: {model_name_simple}) ---")
        logger.info(f" 外部测试 AUC: {auc:.4f}")
        logger.info(f" 外部测试 Accuracy: {acc:.4f}")
        logger.info(f"{'='*60}")

    except Exception as e:
        logger.error(f"运行 外部测试时发生严重错误: {e}", exc_info=True)


def check_external_paths(config, logger):
    """检查外部测试所需的所有路径"""
    paths_to_check = {
        "模型": config["external_test_model_path"],
        "DF": config["external_test_df_path"],
        "Mod1 (WSI)": config["external_test_mod1_feats_path"],
        "Mod2 (VISTA)": config["external_test_mod2_feats_root"],
    }

    all_exist = True
    for name, path in paths_to_check.items():
        if not os.path.exists(path):
            logger.error(f"外部测试路径检查失败: {name} 路径不存在: {path}")
            all_exist = False

    if not all_exist:
        logger.error("=" * 60)
        logger.error("外部测试失败：一个或多个路径配置不正确。")
        logger.error("请修改 CONFIG 中的 'external_test_...' 变量。")
        logger.error("=" * 60)

    return all_exist


# ==============================================================================
# --- [6. 主执行函数 ] ---
# ==============================================================================


def main():
    # 1. 初始化
    logger = setup_logging(CONFIG["log_dir"])
    logger.info(f"配置加载。运行模式: {CONFIG['run_mode']}")

    os.environ["CUDA_VISIBLE_DEVICES"] = CONFIG["gpu_id"]
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"使用设备: {device}")

    set_seed(CONFIG["seed"])
    logger.info(f"随机种子设置为 {CONFIG['seed']}")

    os.makedirs(CONFIG["ml_save_dir"], exist_ok=True)
    os.makedirs(CONFIG["error_log_dir"], exist_ok=True)

    # 2. 执行训练 (如果需要)
    if CONFIG["run_mode"] in ["full", "train_only"]:
        logger.info(f"\n{'='*60}")
        logger.info("--- 开始 5-折交叉验证 (多模态 MAEMIL V2) ---")
        logger.info(f"{'='*60}")

        # 加载并过滤数据
        df, vista_3d_map = load_and_filter_data(CONFIG, logger)

        all_fold_aucs_mil = []
        all_fold_accuracies_mil = []

        for fold_idx, current_split_col in enumerate(CONFIG["fold_columns"]):
            test_auc, test_acc = train_one_fold(
                CONFIG, logger, df, vista_3d_map, current_split_col, fold_idx, device
            )
            all_fold_aucs_mil.append(test_auc)
            all_fold_accuracies_mil.append(test_acc)

        # 总结
        logger.info(f"\n{'='*50}")
        logger.info("--- 多模态 MAEMIL V2 5-折交叉验证总结 ---")
        logger.info(f"{'='*50}")
        mean_auc_mil = np.nanmean(all_fold_aucs_mil)
        std_auc_mil = np.nanstd(all_fold_aucs_mil)
        mean_acc_mil = np.nanmean(all_fold_accuracies_mil)
        std_acc_mil = np.nanstd(all_fold_accuracies_mil)

        logger.info(f"模型: MultiModalMAEMIL_V2 (WSI + 3D VISTA)")
        logger.info(f"  -> 平均 Best AUC: {mean_auc_mil:.4f} \u00b1 {std_auc_mil:.4f}")
        logger.info(
            f"  -> 平均 Best Accuracy: {mean_acc_mil:.4f} \u00b1 {std_acc_mil:.4f}"
        )
        logger.info(f"  -> (个体): {np.round(all_fold_aucs_mil, 4)}")

    # 3. 执行外部测试 (如果需要)
    if CONFIG["run_mode"] in ["full", "test_only"]:

        # [NEW] 外部测试策略: 测试 5 个折叠的模型并报告平均值
        logger.info(f"\n{'='*60}")
        logger.info("--- (V2) 启动外部测试 (5-Folds Ensemble) ---")

        all_external_model_paths = []
        for fold_idx in range(len(CONFIG["fold_columns"])):
            model_path = os.path.join(
                CONFIG["ml_save_dir"], f"multimodal_model_fold_{fold_idx + 1}_best.pth"
            )
            if os.path.exists(model_path):
                all_external_model_paths.append(model_path)
            else:
                logger.warning(f"未找到模型: {model_path}, 跳过...")

        if not all_external_model_paths:
            logger.error(
                "在 'ml_save_dir' 中找不到任何 *_best.pth 模型! 无法执行外部测试。"
            )

        else:
            logger.info(f"将对 {len(all_external_model_paths)} 个模型进行外部测试...")

            # (仅用于配置检查)
            temp_config = CONFIG.copy()
            temp_config["external_test_model_path"] = all_external_model_paths[0]

            if check_external_paths(temp_config, logger):
                # 遍历所有模型
                for model_path in all_external_model_paths:
                    test_config = CONFIG.copy()
                    test_config["external_test_model_path"] = model_path
                    run_external_test(test_config, logger, device)
            else:
                logger.info("跳过外部测试, 因为路径检查失败。")

    elif CONFIG["run_mode"] == "train_only":
        logger.info("跳过外部测试 (run_mode='train_only')")

    if CONFIG["run_mode"] not in ["full", "train_only", "test_only"]:
        logger.error(
            f"无效的 'run_mode': {CONFIG['run_mode']}. 请选择 'full', 'train_only', 或 'test_only'."
        )

    logger.info("--- 所有分析完成 ---")


if __name__ == "__main__":
    main()
