import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import h5py
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import roc_auc_score
import logging
import sys
from trident.slide_encoder_models import ABMILSlideEncoder
from typing import Dict, Union, Tuple  # Add this import for type hinting

# *** 新增：为 TransMIL 和 HybridAttnMIL 导入 Transformer 模块 ***
from torch.nn import TransformerEncoder, TransformerEncoderLayer

import datetime
os.environ["CUDA_VISIBLE_DEVICES"] = "1"  # 设置可见的GPU

# --- 日志配置 ---
now = datetime.datetime.now()
# 2. 格式化时间戳 (例如: "20251027_1942")
# %Y = 年, %m = 月, %d = 日, %H = 时, %M = 分
timestamp = now.strftime("%Y%m%d_%H%M")

log_file_path = f"logs/{timestamp}_cross_mil_val_training_splits2.log"
logging.getLogger().handlers = []
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(levelname)s - %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S",
    handlers=[
        logging.FileHandler(log_file_path, mode="w"),
        logging.StreamHandler(sys.stdout),
    ],
)
logger = logging.getLogger()
logger.info("日志系统已启动。")
# --- 日志配置结束 ---

import pandas as pd
import os

# 从您的本地 TSV 文件加载标签和拆分
# split_file_path = "/data0/lcy/Patho-Bench/tools/zzylnm_slices_splits2/cohort.tsv"
split_file_path = "/data0/lcy/Patho-Bench/tools/zzylnm_slices_splits/k=all.tsv"

parquet_path = split_file_path.replace(".tsv", ".parquet")

if os.path.exists(parquet_path):
    print(f"正在从更快的 Parquet 文件加载: {parquet_path}")
    df = pd.read_parquet(parquet_path)
else:
    print(f"首次加载 TSV 文件（可能需要一些时间）: {split_file_path}")
    df = pd.read_csv(split_file_path, sep="\t")
    print(f"正在保存为 Parquet 格式以加快未来加载速度: {parquet_path}")
    df.to_parquet(parquet_path)

# 1. 获取所有可用的 H5 文件的基本名称（不带 .h5）
# feats_path = "/data0/lcy/data/LNM/LNM_slices_conch_v1_processed/20x_512px_0px_overlap/features_conch_v1"
feats_path = "/data0/lcy/data/LNM/LNM_slices_uni_v1_processed/10x_512px_0px_overlap/features_uni_v1"

available_files = [
    f.replace(".h5", "") for f in os.listdir(feats_path) if f.endswith(".h5")
]
available_files_set = set(available_files)

# 2. 检查 df 中有多少 ID 是缺失的
df_ids = set(df["slide_id"].unique())
missing_ids = df_ids - available_files_set

if missing_ids:
    print(
        f"警告：在 df 中找到了 {len(missing_ids)} 个 slide_id，但在 feats_path 中没有对应的 .h5 文件。"
    )
    print("将从 df 中过滤掉这些缺失的 ID。")
    # print(f"缺失的 ID 示例: {list(missing_ids)[:5]}") # 取消注释以查看示例

# 3. 过滤 df，只保留那些 H5 文件确实存在的行
df = df[df["slide_id"].isin(available_files_set)].copy()

# 根据您的文件内容，标签列是 'label'
task_label_col = "label"

# 检查标签分布
df_counts = df[task_label_col].value_counts().reset_index()
df_counts.columns = [task_label_col, "Count"]
df_counts

# --- 配置 ---
task_label_col = "label"
feature_dim = 1024
fold_columns = ["fold_0", "fold_1", "fold_2", "fold_3", "fold_4"]

# *** 新增：模型保存目录 ***
save_dir = "saved_models"
os.makedirs(save_dir, exist_ok=True)  # 确保目录存在
# ---------------------

# 设置确定性行为
SEED = 42
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
logger.info(f"随机种子设置为 {SEED}")


# --- 模型定义 ---


# 1. 原始模型 (ABMIL)
class BinaryClassificationModel(nn.Module):
    # (模型定义与之前相同)
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_heads=1,
        head_dim=512,
        dropout=0.0,
        gated=True,
        hidden_dim=256,
    ):
        super().__init__()
        self.feature_encoder = ABMILSlideEncoder(
            freeze=False,
            input_feature_dim=input_feature_dim,
            n_heads=n_heads,
            head_dim=head_dim,
            dropout=dropout,
            gated=gated,
        )
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x, return_raw_attention=False):
        # x 是一个字典 {'features': tensor}
        # ABMILSlideEncoder 期望接收这种字典格式
        if return_raw_attention:
            features, attn = self.feature_encoder(x, return_raw_attention=True)
        else:
            features = self.feature_encoder(x)

        # features 是聚合后的 [B, D]
        logits = self.classifier(features).squeeze(1)

        if return_raw_attention:
            return logits, attn

        return logits


# *** 2. 新增: DSMILModel ***
class DSMILModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        hidden_dim=256,
        n_heads=1,
        head_dim=512,
        dropout=0.0,
        gated=True,
    ):
        super().__init__()
        # 1. 实例流 (Instance-Level Stream)
        # 它会为每个 patch 打分
        self.instance_classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

        # 2. 包流 (Bag-Level Stream)
        # 使用与 ABMIL 相同的聚合器
        self.bag_aggregator = ABMILSlideEncoder(
            freeze=False,
            input_feature_dim=input_feature_dim,
            n_heads=n_heads,
            head_dim=head_dim,
            dropout=dropout,
            gated=gated,
        )
        # 包流分类器
        self.bag_classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        # x_dict 是 {'features': tensor}
        # features 形状: [B, N, D] (B=1 在评估时, N=可变; B=batch_size 在训练时, N=512)
        features = x_dict["features"]

        # --- 实例流 ---
        # instance_logits 形状: [B, N, 1]
        instance_logits = self.instance_classifier(features)
        # 找到分数最高的 "关键实例"
        # critical_logit 形状: [B, 1]
        critical_logit, _ = torch.max(instance_logits, dim=1)

        # --- 包流 ---
        # bag_feature 形状: [B, D]
        bag_feature = self.bag_aggregator(x_dict)
        # bag_logit 形状: [B, 1]
        bag_logit = self.bag_classifier(bag_feature)

        # --- 组合 ---
        # 简单地将两个 logits 平均
        final_logit = (critical_logit + bag_logit) / 2

        return final_logit.squeeze(1)  # 返回 [B]


# *** 3. 新增: TransMILModel ***
class TransMILModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_head=8,
        num_encoder_layers=2,
        dim_feedforward=512,
        hidden_dim=256,
        dropout=0.1,
    ):
        super().__init__()
        self.input_feature_dim = input_feature_dim

        # Transformer 编码器层
        # 使用 batch_first=True 使输入形状为 [B, N, D]
        encoder_layer = TransformerEncoderLayer(
            d_model=input_feature_dim,
            nhead=n_head,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True,
        )
        self.transformer_encoder = TransformerEncoder(
            encoder_layer, num_layers=num_encoder_layers
        )

        # 分类器
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        # features 形状: [B, N, D]
        features = x_dict["features"]

        # 1. Transformer 编码
        # context_features 形状: [B, N, D]
        context_features = self.transformer_encoder(features)

        # 2. 聚合 (这里使用简单的平均池化)
        # bag_feature 形状: [B, D]
        bag_feature = torch.mean(context_features, dim=1)

        # 3. 分类
        logits = self.classifier(bag_feature)

        return logits.squeeze(1)  # 返回 [B]


# *** 4. 新增: HybridAttnMILModel (创新 MIL) v2 ***
class HybridAttnMILModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=512,
        # Transformer 参数
        n_head=8,
        num_encoder_layers=2,
        transformer_ff_dim=2048,
        # ABMIL 参数
        abmil_n_heads=1,
        abmil_head_dim=512,
        # 分类器参数
        hidden_dim=256,
        dropout=0.1,
    ):
        super().__init__()

        # 1. 上下文编码器 (Transformer with Pre-Norm)
        encoder_layer = TransformerEncoderLayer(
            d_model=input_feature_dim,
            nhead=n_head,
            dim_feedforward=transformer_ff_dim,
            dropout=dropout,
            batch_first=True,
            norm_first=True,
        )
        self.context_encoder = TransformerEncoder(
            encoder_layer,
            num_layers=num_encoder_layers,
            norm=nn.LayerNorm(input_feature_dim),
        )

        # 2. Per-patch 门控网络：决定每个 patch 融合多少上下文信息
        self.gate_net = nn.Sequential(
            nn.Linear(input_feature_dim, 64),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(64, 1),
            nn.Sigmoid(),  # 输出 [0,1] 权重
        )

        # 3. 注意力聚合器 (ABMIL)
        self.attention_aggregator = ABMILSlideEncoder(
            freeze=False,
            input_feature_dim=input_feature_dim,
            n_heads=abmil_n_heads,
            head_dim=abmil_head_dim,
            dropout=dropout,
            gated=True,
        )

        # 4. 分类器
        self.bag_norm = nn.LayerNorm(input_feature_dim)
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        features = x_dict["features"]  # [B, N, D]

        # 获取上下文增强特征
        context_features = self.context_encoder(features)  # [B, N, D]

        # 计算每个 patch 的门控权重: [B, N, 1]
        gate = self.gate_net(features)  # 基于原始特征预测门控

        # 融合：保留原始特征为主，自适应注入上下文
        fused_features = (1 - gate) * features + gate * context_features  # [B, N, D]

        # 送入 ABMIL 聚合
        bag_feature = self.attention_aggregator({"features": fused_features})  # [B, D]

        # 分类
        bag_feature = self.bag_norm(bag_feature)
        logits = self.classifier(bag_feature)

        return logits.squeeze(1)  # [B]


# --- 线性探针模型 ---
# *** 5. 新增: LinearProbeModel ***
class LinearProbeModel(nn.Module):
    def __init__(self, input_feature_dim=feature_dim):
        """
        初始化线性探针模型。
        :param input_feature_dim: 输入特征的维度 (例如 512)
        """
        super().__init__()
        self.input_feature_dim = input_feature_dim

        # 定义一个从聚合特征到单个 logit 的线性分类器
        self.classifier = nn.Linear(self.input_feature_dim, 1)

    def forward(self, x_dict: Dict[str, torch.Tensor]) -> torch.Tensor:
        """
        前向传播。
        :param x_dict: 包含 'features' 的字典, 'features' 形状为 [B, N, D]
        :return: 批次的 logits, 形状为 [B]
        """
        # features 形状: [B, N, D] (B=批大小, N=patch数, D=特征维度)
        features = x_dict["features"]

        # 1. 聚合 (Aggregation)
        #    沿着 N 维度 (dim=1) 进行平均池化
        #    bag_feature 形状: [B, D]
        bag_feature = torch.mean(features, dim=1)

        # 2. 分类 (Classification)
        #    logits 形状: [B, 1]
        logits = self.classifier(bag_feature)

        # 3. 压缩维度以匹配标签 [B]
        return logits.squeeze(1)


# *** 6. 新增: MAEMILModel (MAE-inspired MIL Classifier) ***
class MAEMILModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_head=8,
        num_encoder_layers=2,
        dim_feedforward=512,
        hidden_dim=256,
        dropout=0.1,
        mask_ratio=0.75,
    ):
        """
        初始化 MAE 启发的 MIL 分类器。
        :param mask_ratio: 在训练期间要掩码 (丢弃) 的 patch 的比例。
        """
        super().__init__()
        self.input_feature_dim = input_feature_dim
        self.mask_ratio = mask_ratio

        # Transformer 编码器层 (与 TransMIL 相同)
        encoder_layer = TransformerEncoderLayer(
            d_model=input_feature_dim,
            nhead=n_head,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True,
        )
        self.transformer_encoder = TransformerEncoder(
            encoder_layer, num_layers=num_encoder_layers
        )

        # 分类器 (与 TransMIL 相同)
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        # features 形状: [B, N, D]
        features = x_dict["features"]
        B, N, D = features.shape

        if self.training:
            # --- 训练模式: 应用随机掩码 ---

            # 计算可见 patch 的数量
            num_visible = int(N * (1 - self.mask_ratio))
            if num_visible < 1:
                num_visible = 1  # 确保至少有1个 patch 可见

            # 为批次中的每个样本生成随机索引
            # noise 形状: [B, N]
            noise = torch.rand(B, N, device=features.device)

            # ids_shuffle 形状: [B, N]
            # 沿 N 维度对噪声进行排序，得到混洗后的索引
            ids_shuffle = torch.argsort(noise, dim=1)

            # ids_visible 形状: [B, num_visible]
            # 只保留前 num_visible 个索引
            ids_visible = ids_shuffle[:, :num_visible]

            # features_visible 形状: [B, num_visible, D]
            # 使用 gather 从 'features' 中选取可见的 patches
            # .expand(...) 确保索引在 D 维度上广播
            features_visible = torch.gather(
                features, dim=1, index=ids_visible.unsqueeze(-1).expand(-1, -1, D)
            )

            # 将可见 patches 送入 Transformer
            # context_features 形状: [B, num_visible, D]
            context_features = self.transformer_encoder(features_visible)

        else:
            # --- 评估模式: 不使用掩码 ---
            # 将所有 patches 送入 Transformer
            # context_features 形状: [B, N, D]
            context_features = self.transformer_encoder(features)

        # 2. 聚合 (对 Transformer 的输出进行平均池化)
        # 无论 N 是 num_visible 还是 N_all，都沿 dim=1 (patch 维度) 求平均
        # bag_feature 形状: [B, D]
        bag_feature = torch.mean(context_features, dim=1)

        # 3. 分类
        logits = self.classifier(bag_feature)

        return logits.squeeze(1)  # 返回 [B]


# --- 模型定义结束 ---


# 初始化设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info(f"使用设备: {device}")


# 训练/验证使用的数据集
class H5Dataset(Dataset):
    def __init__(self, feats_path, df, split, split_col, num_features=512):
        self.df = df[df[split_col] == split]
        self.feats_path = feats_path
        self.num_features = num_features  # 这是训练时采样的 patch 数量
        self.split = split
        # *** 修改: 修正特征维度硬编码 ***
        self.feature_dim = feature_dim  # 使用全局配置的特征维度

    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        row = self.df.iloc[idx]
        with h5py.File(
            os.path.join(self.feats_path, row["slide_id"] + ".h5"), "r"
        ) as f:
            features = torch.from_numpy(f["features"][:])  # 加载所有 [N_all, D]

        if self.split == "train":
            # (训练时的采样逻辑)
            num_available = features.shape[0]
            if num_available >= self.num_features:
                # 不重复采样
                indices = torch.randperm(
                    num_available, generator=torch.Generator().manual_seed(SEED)
                )[: self.num_features]
            else:
                # 有重复采样
                indices = torch.randint(
                    num_available,
                    (self.num_features,),
                    generator=torch.Generator().manual_seed(SEED),
                )
            features = features[indices]  # 采样后 [num_features, D]

        # 对于 'test', features 保持为 [N_all, D]

        label = torch.tensor(row[task_label_col], dtype=torch.float32)
        return features, label


batch_size = 8
num_epochs = 1
learning_rate = 4e-4 # 1e-4  # *** 新增: 统一定义学习率 ***

logger.info(f"特征路径: {feats_path}")
logger.info(f"Batch Size: {batch_size}, Epochs: {num_epochs}, LR: {learning_rate}")

# *** 新增: 定义要运行的模型列表 ***
# (您可以修改这个列表来选择要运行的模型)
# model_types_to_run = ["abmil", "dsmil", "transmil", "hybrid_mil" , "linearprobe", "maemil", "maemil_registers"]
model_types_to_run = [
    "abmil",
    # "dsmil",
    # "transmil",
    # "linearprobe",
    "maemil",
]  #
all_model_results = {}  # 用于存储所有模型的最终结果

if "df" not in locals():
    logger.error("=" * 60)
    logger.error("错误: DataFrame 'df' 未定义。训练中止。")
    logger.error("=" * 60)
else:
    # --- 外层循环：遍历每种模型 ---
    for model_type in model_types_to_run:
        logger.info(f"\n{'='*60}")
        logger.info(f"------ 开始训练模型: {model_type.upper()} ------")
        logger.info(f"{'='*60}")

        # 为当前模型类型重置折叠结果
        all_fold_aucs = []
        all_fold_accuracies = []

        # --- 内层循环：5-折交叉验证 ---
        for fold_idx, current_split_col in enumerate(fold_columns):
            logger.info(f"\n{'='*50}")
            logger.info(
                f"--- 模型: {model_type.upper()} | 开始第 {fold_idx + 1}/{len(fold_columns)} 折 (使用列: {current_split_col}) ---"
            )
            logger.info(f"{'='*50}")

            # 1. 重新初始化模型和优化器
            if model_type == "abmil":
                model = BinaryClassificationModel().to(device)
            elif model_type == "dsmil":
                model = DSMILModel().to(device)
            elif model_type == "transmil":
                model = TransMILModel().to(device)
            elif model_type == "hybrid_mil":
                model = HybridAttnMILModel(input_feature_dim=feature_dim).to(device)
            elif model_type == "linearprobe":
                model = LinearProbeModel(input_feature_dim=feature_dim).to(device)
            elif model_type == "maemil":
                model = MAEMILModel(input_feature_dim=feature_dim).to(device)

            else:
                logger.error(f"未知的 model_type: {model_type}。跳过此模型。")
                break  # 跳出 CV 循环，进入下一个 model_type

            optimizer = optim.Adam(model.parameters(), lr=learning_rate)
            criterion = nn.BCEWithLogitsLoss()

            # 2. 创建 DataLoaders
            # 注意: H5Dataset 在 'train' 时采样, 在 'test' 时加载全部
            train_loader = DataLoader(
                H5Dataset(
                    feats_path,
                    df,
                    "train",
                    split_col=current_split_col,
                    # num_features=512,
                    num_features=1024,
                ),
                batch_size=batch_size,
                shuffle=True,
                worker_init_fn=lambda _: np.random.seed(SEED),
            )
            # 测试时 batch_size 必须为 1，因为每个包的 patch 数量 (N) 不同
            test_loader = DataLoader(
                H5Dataset(
                    feats_path,
                    df,
                    "test",
                    split_col=current_split_col,
                    # num_features=512,
                    num_features=1024,
                ),
                batch_size=1,
                shuffle=False,
                worker_init_fn=lambda _: np.random.seed(SEED),
            )
            logger.info(
                f"Fold {fold_idx + 1}: Train samples = {len(train_loader.dataset)} (Batch Size={batch_size}), Test samples = {len(test_loader.dataset)} (Batch Size=1)"
            )

            # 3. 训练循环
            for epoch in range(num_epochs):
                model.train()  # *** 重要: 确保模型处于训练模式 (MAEMIL 需要) ***
                total_loss = 0.0
                for features_batch, labels_batch in train_loader:
                    # features_batch 形状: [B, N_sampled, D]
                    # labels_batch 形状: [B]

                    # *** 修改: 统一输入格式为字典 ***
                    # 所有模型都期望 {'features': tensor}
                    features_dict = {"features": features_batch.to(device)}
                    labels = labels_batch.to(device)

                    optimizer.zero_grad()
                    outputs = model(features_dict)  # outputs 形状: [B]
                    loss = criterion(outputs, labels)
                    loss.backward()
                    optimizer.step()
                    total_loss += loss.item()

                logger.info(
                    f"Fold {fold_idx + 1}, Epoch {epoch+1}/{num_epochs}, Loss: {total_loss/len(train_loader):.4f}"
                )

            # 4. 评估
            model.eval()  # *** 重要: 确保模型处于评估模式 (MAEMIL 需要) ***
            all_labels, all_outputs = [], []
            correct = 0
            total = 0

            with torch.no_grad():
                for features_slide, labels_slide in test_loader:
                    # features_slide 形状: [1, N_all, D]
                    # labels_slide 形状: [1]

                    # *** 修改: 统一输入格式为字典 ***
                    features_dict = {"features": features_slide.to(device)}
                    labels = labels_slide.to(device)

                    outputs = model(features_dict)  # outputs 形状: [1]

                    predicted = (
                        torch.sigmoid(outputs) > 0.5
                    ).float()  # 使用 sigmoid 转换
                    correct += (predicted == labels).sum().item()
                    total += labels.size(0)
                    all_outputs.append(outputs.cpu().numpy())
                    all_labels.append(labels.cpu().numpy())

            # 5. 计算指标
            all_outputs = np.concatenate(all_outputs)
            all_labels = np.concatenate(all_labels)

            auc = np.nan
            if len(np.unique(all_labels)) > 1:
                auc = roc_auc_score(all_labels, all_outputs)
            else:
                logger.warning(
                    f"Fold {fold_idx + 1} Warning: Test set only contains one class. AUC cannot be calculated."
                )

            accuracy = correct / total

            logger.info(f"--- Fold {fold_idx + 1} 结果 ({model_type.upper()}) ---")
            logger.info(f"Test AUC: {auc:.4f}")
            logger.info(f"Test Accuracy: {accuracy:.4f}")

            all_fold_aucs.append(auc)
            all_fold_accuracies.append(accuracy)

            # 6. 保存模型 (*** 修改: 保存路径包含 model_type ***)
            model_save_dir = os.path.join(save_dir, model_type)
            os.makedirs(model_save_dir, exist_ok=True)
            model_save_path = os.path.join(
                model_save_dir, f"model_{current_split_col}.pth"
            )
            torch.save(model.state_dict(), model_save_path)
            logger.info(f"Model for fold {fold_idx + 1} saved to: {model_save_path}")

        # --- 当前模型交叉验证总结 ---
        logger.info(f"\n{'='*50}")
        logger.info(f"--- {model_type.upper()} 5-折交叉验证总结 ---")
        logger.info(f"{'='*50}")

        mean_auc = np.nanmean(all_fold_aucs)
        std_auc = np.nanstd(all_fold_aucs)
        mean_accuracy = np.nanmean(all_fold_accuracies)
        std_accuracy = np.nanstd(all_fold_accuracies)

        logger.info(f"平均 AUC: {mean_auc:.4f} \u00b1 {std_auc:.4f}")
        logger.info(f"平均 Accuracy: {mean_accuracy:.4f} \u00b1 {std_accuracy:.4f}")

        # 存储结果以供最终比较
        all_model_results[model_type] = {
            "mean_auc": mean_auc,
            "std_auc": std_auc,
            "mean_accuracy": mean_accuracy,
            "std_accuracy": std_accuracy,
        }

    # --- *** 新增: 最终所有模型总结 *** ---
    logger.info(f"\n{'='*60}")
    logger.info("------ 最终所有模型 5-折交叉验证对比 ------")
    logger.info(f"{'='*60}")
    for model_type, results in all_model_results.items():
        logger.info(f"模型: {model_type.upper()}")
        logger.info(
            f"  平均 AUC: {results['mean_auc']:.4f} \u00b1 {results['std_auc']:.4f}"
        )
        logger.info(
            f"  平均 Accuracy: {results['mean_accuracy']:.4f} \u00b1 {results['std_accuracy']:.4f}"
        )

    logger.info(f"\n所有训练完成。日志已保存到 {log_file_path}")

# ==============================
# === 外部测试（External Test）===
# ==============================

# *** 配置外部测试路径 ***
external_manifest_path = "/data0/lcy/Patho-Bench/tools/zzylnm_zhujiang_splits/cohort.tsv"  # 修改为你的外部测试标签文件
external_feats_path = "/data0/lcy/data/LNM/LNM_Zhujiang_uni_v1_processed/10x_512px_0px_overlap/features_uni_v1"

# 检查外部测试特征路径是否存在
if not os.path.exists(external_feats_path):
    logger.error(f"外部测试特征路径不存在: {external_feats_path}")
    external_test_enabled = False
else:
    external_test_enabled = True

if external_test_enabled:
    logger.info("\n" + "=" * 60)
    logger.info("--- 开始外部测试 ---")
    logger.info("=" * 60)

    # 加载外部测试 manifest
    try:
        if external_manifest_path.endswith(".parquet"):
            ext_df = pd.read_parquet(external_manifest_path)
        else:
            ext_df = pd.read_csv(external_manifest_path, sep="\t")
        logger.info(f"外部测试样本数 (原始): {len(ext_df)}")
    except Exception as e:
        logger.error(f"加载外部 manifest 时出错: {e}")
        external_test_enabled = False

if external_test_enabled:
    # 过滤：只保留存在 .h5 特征的 slide_id
    ext_available_files = {
        f.replace(".h5", "")
        for f in os.listdir(external_feats_path)
        if f.endswith(".h5")
    }
    ext_df = ext_df[ext_df["slide_id"].isin(ext_available_files)].copy()
    logger.info(f"外部测试样本数 (过滤后): {len(ext_df)}")

    if len(ext_df) == 0:
        logger.warning("外部测试集中无有效样本，跳过外部测试。")
        external_test_enabled = False


# 定义外部测试 Dataset（不采样，保留全部 patch）
class ExternalH5Dataset(Dataset):
    def __init__(self, feats_path, df, label_col="label"):
        self.df = df
        self.feats_path = feats_path
        self.label_col = label_col

    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        row = self.df.iloc[idx]
        slide_id = row["slide_id"]
        with h5py.File(os.path.join(self.feats_path, slide_id + ".h5"), "r") as f:
            features = torch.from_numpy(f["features"][:])  # [N, D]
        label = torch.tensor(row[self.label_col], dtype=torch.float32)
        return features, label


# 外部测试函数
def run_external_test(model_type, model_class, model_path, feats_path, df, device):
    logger.info(f"加载模型: {model_path}")
    try:
        # 初始化模型
        if model_type == "abmil":
            model = model_class()
        elif model_type == "dsmil":
            model = model_class()
        elif model_type == "transmil":
            model = model_class()
        elif model_type == "hybrid_mil":
            model = model_class(input_feature_dim=feature_dim)
        elif model_type == "linearprobe":
            model = model_class(input_feature_dim=feature_dim)
        elif model_type == "maemil":
            model = model_class(input_feature_dim=feature_dim)
        else:
            raise ValueError(f"未知模型类型: {model_type}")

        model = model.to(device)
        state_dict = torch.load(model_path, map_location=device)
        model.load_state_dict(state_dict)
        logger.info("模型权重加载成功。")
    except Exception as e:
        logger.error(f"加载模型失败: {e}")
        return None, None

    model.eval()
    ext_dataset = ExternalH5Dataset(feats_path, df)
    ext_loader = DataLoader(ext_dataset, batch_size=1, shuffle=False)

    all_labels, all_outputs = [], []
    correct, total = 0, 0

    with torch.no_grad():
        for features, labels in ext_loader:
            features = features.to(device)  # [1, N, D]
            labels = labels.to(device)  # [1]

            features_dict = {"features": features}
            outputs = model(features_dict)  # [1]

            predicted = (torch.sigmoid(outputs) > 0.5).float()
            correct += (predicted == labels).sum().item()
            total += labels.size(0)

            all_outputs.append(outputs.cpu().numpy())
            all_labels.append(labels.cpu().numpy())

    all_outputs = np.concatenate(all_outputs)
    all_labels = np.concatenate(all_labels)

    # 计算 AUC
    auc = np.nan
    if len(np.unique(all_labels)) > 1:
        auc = roc_auc_score(all_labels, all_outputs)
    else:
        logger.warning("外部测试集仅包含单一类别，无法计算 AUC。")

    accuracy = correct / total if total > 0 else 0.0

    return auc, accuracy


# 执行外部测试（对每个模型类型和每个 fold）
if external_test_enabled:
    external_results = {}

    for model_type in model_types_to_run:
        logger.info(f"\n{'-'*50}")
        logger.info(f"外部测试: {model_type.upper()}")
        logger.info(f"{'-'*50}")

        fold_aucs, fold_accs = [], []

        for fold_col in fold_columns:
            model_save_path = os.path.join(
                save_dir, model_type, f"model_{fold_col}.pth"
            )
            if not os.path.exists(model_save_path):
                logger.warning(f"模型文件不存在，跳过: {model_save_path}")
                continue

            # 根据 model_type 选择类
            model_class_map = {
                "abmil": BinaryClassificationModel,
                "dsmil": DSMILModel,
                "transmil": TransMILModel,
                "hybrid_mil": HybridAttnMILModel,
                "linearprobe": LinearProbeModel,
                "maemil": MAEMILModel,
            }
            model_class = model_class_map.get(model_type)
            if model_class is None:
                logger.error(f"未找到模型类: {model_type}")
                continue

            auc, acc = run_external_test(
                model_type=model_type,
                model_class=model_class,
                model_path=model_save_path,
                feats_path=external_feats_path,
                df=ext_df,
                device=device,
            )

            if auc is not None:
                logger.info(f"  {fold_col} -> AUC: {auc:.4f}, Acc: {acc:.4f}")
                fold_aucs.append(auc)
                fold_accs.append(acc)

        if fold_aucs:
            mean_auc = np.mean(fold_aucs)
            std_auc = np.std(fold_aucs)
            mean_acc = np.mean(fold_accs)
            std_acc = np.std(fold_accs)

            external_results[model_type] = {
                "mean_auc": mean_auc,
                "std_auc": std_auc,
                "mean_acc": mean_acc,
                "std_acc": std_acc,
            }

            logger.info(
                f"  [汇总] {model_type.upper()} 外部测试 AUC: {mean_auc:.4f} ± {std_auc:.4f}"
            )
            logger.info(
                f"  [汇总] {model_type.upper()} 外部测试 Acc: {mean_acc:.4f} ± {std_acc:.4f}"
            )

    # 最终外部测试总结
    if external_results:
        logger.info(f"\n{'='*60}")
        logger.info("------ 外部测试最终结果汇总 ------")
        logger.info(f"{'='*60}")
        for model_type, res in external_results.items():
            logger.info(
                f"{model_type.upper():12} | AUC: {res['mean_auc']:.4f} ± {res['std_auc']:.4f} | "
                f"Acc: {res['mean_acc']:.4f} ± {res['std_acc']:.4f}"
            )
