import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import h5py
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import roc_auc_score, roc_curve
import logging
import sys
import matplotlib.pyplot as plt
from trident.slide_encoder_models import ABMILSlideEncoder

from torch.nn import TransformerEncoder, TransformerEncoderLayer


# --- 配置 ---
task_label_col = "label"
feature_dim = 1024
fold_columns = ["fold_0", "fold_1", "fold_2", "fold_3", "fold_4"]


# --- 日志配置 ---
log_file_path = "external_test3.log"
logging.getLogger().handlers = []
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(levelname)s - %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S",
    handlers=[
        logging.FileHandler(log_file_path, mode="w"),
        logging.StreamHandler(sys.stdout),
    ],
)
logger = logging.getLogger()
logger.info("日志系统已启动。")
# --- 日志配置结束 ---
logger.info("\n--- 开始运行外部测试示例 ---")


# 初始化设备
device = torch.device("cuda:3" if torch.cuda.is_available() else "cpu")
logger.info(f"使用设备: {device}")


# 1. 原始模型 (ABMIL)
class BinaryClassificationModel(nn.Module):
    # (模型定义与之前相同)
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_heads=1,
        head_dim=512,
        dropout=0.0,
        gated=True,
        hidden_dim=256,
    ):
        super().__init__()
        self.feature_encoder = ABMILSlideEncoder(
            freeze=False,
            input_feature_dim=input_feature_dim,
            n_heads=n_heads,
            head_dim=head_dim,
            dropout=dropout,
            gated=gated,
        )
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x, return_raw_attention=False):
        # x 是一个字典 {'features': tensor}
        # ABMILSlideEncoder 期望接收这种字典格式
        if return_raw_attention:
            features, attn = self.feature_encoder(x, return_raw_attention=True)
        else:
            features = self.feature_encoder(x)

        # features 是聚合后的 [B, D]
        logits = self.classifier(features).squeeze(1)

        if return_raw_attention:
            return logits, attn

        return logits


# *** 2. 新增: DSMILModel ***
class DSMILModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        hidden_dim=256,
        n_heads=1,
        head_dim=512,
        dropout=0.0,
        gated=True,
    ):
        super().__init__()
        # 1. 实例流 (Instance-Level Stream)
        # 它会为每个 patch 打分
        self.instance_classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

        # 2. 包流 (Bag-Level Stream)
        # 使用与 ABMIL 相同的聚合器
        self.bag_aggregator = ABMILSlideEncoder(
            freeze=False,
            input_feature_dim=input_feature_dim,
            n_heads=n_heads,
            head_dim=head_dim,
            dropout=dropout,
            gated=gated,
        )
        # 包流分类器
        self.bag_classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        # x_dict 是 {'features': tensor}
        # features 形状: [B, N, D] (B=1 在评估时, N=可变; B=batch_size 在训练时, N=512)
        features = x_dict["features"]

        # --- 实例流 ---
        # instance_logits 形状: [B, N, 1]
        instance_logits = self.instance_classifier(features)
        # 找到分数最高的 "关键实例"
        # critical_logit 形状: [B, 1]
        critical_logit, _ = torch.max(instance_logits, dim=1)

        # --- 包流 ---
        # bag_feature 形状: [B, D]
        bag_feature = self.bag_aggregator(x_dict)
        # bag_logit 形状: [B, 1]
        bag_logit = self.bag_classifier(bag_feature)

        # --- 组合 ---
        # 简单地将两个 logits 平均
        final_logit = (critical_logit + bag_logit) / 2

        return final_logit.squeeze(1)  # 返回 [B]


# *** 3. 新增: TransMILModel ***
class TransMILModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_head=8,
        num_encoder_layers=2,
        dim_feedforward=512,
        hidden_dim=256,
        dropout=0.1,
    ):
        super().__init__()
        self.input_feature_dim = input_feature_dim

        # Transformer 编码器层
        # 使用 batch_first=True 使输入形状为 [B, N, D]
        encoder_layer = TransformerEncoderLayer(
            d_model=input_feature_dim,
            nhead=n_head,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True,
        )
        self.transformer_encoder = TransformerEncoder(
            encoder_layer, num_layers=num_encoder_layers
        )

        # 分类器
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        # features 形状: [B, N, D]
        features = x_dict["features"]

        # 1. Transformer 编码
        # context_features 形状: [B, N, D]
        context_features = self.transformer_encoder(features)

        # 2. 聚合 (这里使用简单的平均池化)
        # bag_feature 形状: [B, D]
        bag_feature = torch.mean(context_features, dim=1)

        # 3. 分类
        logits = self.classifier(bag_feature)

        return logits.squeeze(1)  # 返回 [B]


# *** 4. 新增: HybridAttnMILModel (创新 MIL) v2 ***
class HybridAttnMILModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=512,
        # Transformer 参数
        n_head=8,
        num_encoder_layers=2,
        transformer_ff_dim=2048,
        # ABMIL 参数
        abmil_n_heads=1,
        abmil_head_dim=512,
        # 分类器参数
        hidden_dim=256,
        dropout=0.1,
    ):
        super().__init__()

        # 1. 上下文编码器 (Transformer with Pre-Norm)
        encoder_layer = TransformerEncoderLayer(
            d_model=input_feature_dim,
            nhead=n_head,
            dim_feedforward=transformer_ff_dim,
            dropout=dropout,
            batch_first=True,
            norm_first=True,
        )
        self.context_encoder = TransformerEncoder(
            encoder_layer,
            num_layers=num_encoder_layers,
            norm=nn.LayerNorm(input_feature_dim),
        )

        # 2. Per-patch 门控网络：决定每个 patch 融合多少上下文信息
        self.gate_net = nn.Sequential(
            nn.Linear(input_feature_dim, 64),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(64, 1),
            nn.Sigmoid(),  # 输出 [0,1] 权重
        )

        # 3. 注意力聚合器 (ABMIL)
        self.attention_aggregator = ABMILSlideEncoder(
            freeze=False,
            input_feature_dim=input_feature_dim,
            n_heads=abmil_n_heads,
            head_dim=abmil_head_dim,
            dropout=dropout,
            gated=True,
        )

        # 4. 分类器
        self.bag_norm = nn.LayerNorm(input_feature_dim)
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        features = x_dict["features"]  # [B, N, D]

        # 获取上下文增强特征
        context_features = self.context_encoder(features)  # [B, N, D]

        # 计算每个 patch 的门控权重: [B, N, 1]
        gate = self.gate_net(features)  # 基于原始特征预测门控

        # 融合：保留原始特征为主，自适应注入上下文
        fused_features = (1 - gate) * features + gate * context_features  # [B, N, D]

        # 送入 ABMIL 聚合
        bag_feature = self.attention_aggregator({"features": fused_features})  # [B, D]

        # 分类
        bag_feature = self.bag_norm(bag_feature)
        logits = self.classifier(bag_feature)

        return logits.squeeze(1)  # [B]


# *** 6. 新增: MAEMILModel (MAE-inspired MIL Classifier) ***
class MAEMILModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_head=8,
        num_encoder_layers=2,
        dim_feedforward=512,
        hidden_dim=256,
        dropout=0.1,
        mask_ratio=0.75,
    ):
        """
        初始化 MAE 启发的 MIL 分类器。
        :param mask_ratio: 在训练期间要掩码 (丢弃) 的 patch 的比例。
        """
        super().__init__()
        self.input_feature_dim = input_feature_dim
        self.mask_ratio = mask_ratio

        # Transformer 编码器层 (与 TransMIL 相同)
        encoder_layer = TransformerEncoderLayer(
            d_model=input_feature_dim,
            nhead=n_head,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True,
        )
        self.transformer_encoder = TransformerEncoder(
            encoder_layer, num_layers=num_encoder_layers
        )

        # 分类器 (与 TransMIL 相同)
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        # features 形状: [B, N, D]
        features = x_dict["features"]
        B, N, D = features.shape

        if self.training:
            # --- 训练模式: 应用随机掩码 ---

            # 计算可见 patch 的数量
            num_visible = int(N * (1 - self.mask_ratio))
            if num_visible < 1:
                num_visible = 1  # 确保至少有1个 patch 可见

            # 为批次中的每个样本生成随机索引
            # noise 形状: [B, N]
            noise = torch.rand(B, N, device=features.device)

            # ids_shuffle 形状: [B, N]
            # 沿 N 维度对噪声进行排序，得到混洗后的索引
            ids_shuffle = torch.argsort(noise, dim=1)

            # ids_visible 形状: [B, num_visible]
            # 只保留前 num_visible 个索引
            ids_visible = ids_shuffle[:, :num_visible]

            # features_visible 形状: [B, num_visible, D]
            # 使用 gather 从 'features' 中选取可见的 patches
            # .expand(...) 确保索引在 D 维度上广播
            features_visible = torch.gather(
                features, dim=1, index=ids_visible.unsqueeze(-1).expand(-1, -1, D)
            )

            # 将可见 patches 送入 Transformer
            # context_features 形状: [B, num_visible, D]
            context_features = self.transformer_encoder(features_visible)

        else:
            # --- 评估模式: 不使用掩码 ---
            # 将所有 patches 送入 Transformer
            # context_features 形状: [B, N, D]
            context_features = self.transformer_encoder(features)

        # 2. 聚合 (对 Transformer 的输出进行平均池化)
        # 无论 N 是 num_visible 还是 N_all，都沿 dim=1 (patch 维度) 求平均
        # bag_feature 形状: [B, D]
        bag_feature = torch.mean(context_features, dim=1)

        # 3. 分类
        logits = self.classifier(bag_feature)

        return logits.squeeze(1)  # 返回 [B]


# *** 7. 新增: MAEMILModelWithRegisters ***
class MAEMILModelWithRegisters(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_head=8,
        num_encoder_layers=2,
        dim_feedforward=512,
        hidden_dim=256,
        dropout=0.1,
        mask_ratio=0.5,
        num_registers=4,  # 寄存器的数量
    ):
        """
        初始化 MAE + Registers 启发的 MIL 分类器。
        :param mask_ratio: 训练时掩码 patch 的比例。
        :param num_registers: 要添加的“缓存”token 的数量。
        """
        super().__init__()
        self.input_feature_dim = input_feature_dim
        self.mask_ratio = mask_ratio
        self.num_registers = num_registers

        # 1. 定义可学习的 Registers (形状: [1, num_registers, D])
        #    使用 nn.Parameter 使其成为模型的可学习参数
        self.registers = nn.Parameter(
            torch.randn(1, self.num_registers, self.input_feature_dim)
        )

        # 2. Transformer 编码器 (与 TransMIL 相同)
        encoder_layer = TransformerEncoderLayer(
            d_model=input_feature_dim,
            nhead=n_head,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True,
        )
        self.transformer_encoder = TransformerEncoder(
            encoder_layer, num_layers=num_encoder_layers
        )

        # 3. 分类器 (与 TransMIL 相同)
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        # features 形状: [B, N, D]
        features = x_dict["features"]
        B, N, D = features.shape

        # 准备 registers: [1, num_reg, D] -> [B, num_reg, D]
        # .expand 会重复 B 次，而不需要复制内存
        reg = self.registers.expand(B, -1, -1)

        num_patch_tokens_to_pool: int

        if self.training:
            # --- 训练模式: 掩码 patches，但保留 registers ---

            # 1. 计算可见 patch 数量
            num_visible = int(N * (1 - self.mask_ratio))
            if num_visible < 1:
                num_visible = 1

            # 2. 随机选择可见 patch
            noise = torch.rand(B, N, device=features.device)
            ids_shuffle = torch.argsort(noise, dim=1)
            ids_visible = ids_shuffle[:, :num_visible]

            # 3. 提取可见 patch
            features_visible = torch.gather(
                features, dim=1, index=ids_visible.unsqueeze(-1).expand(-1, -1, D)
            )  # 形状: [B, num_visible, D]

            # 4. 拼接: [可见 patches, registers]
            transformer_input = torch.cat([features_visible, reg], dim=1)
            # 形状: [B, num_visible + num_registers, D]

            # 记录我们稍后需要池化的 token 数量
            num_patch_tokens_to_pool = num_visible

        else:
            # --- 评估模式: 使用所有 patches 和 registers ---

            # 1. 拼接: [所有 patches, registers]
            transformer_input = torch.cat([features, reg], dim=1)
            # 形状: [B, N + num_registers, D]

            # 记录我们稍后需要池化的 token 数量
            num_patch_tokens_to_pool = N

        # 5. 通过 Transformer
        # (B, num_visible + num_reg, D) 或 (B, N + num_reg, D)
        transformer_output = self.transformer_encoder(transformer_input)

        # 6. 聚合 [关键步骤]
        #    我们只对 patch token 的输出进行平均池化。
        #    根据定义，patch token 始终位于序列的前面。
        #    我们只取前 'num_patch_tokens_to_pool' 个 token。
        patch_outputs = transformer_output[:, :num_patch_tokens_to_pool, :]

        # bag_feature 形状: [B, D]
        bag_feature = torch.mean(patch_outputs, dim=1)

        # 7. 分类
        logits = self.classifier(bag_feature)

        return logits.squeeze(1)  # 返回 [B]


# *** 修改：用于外部测试的数据集 ***
class ExternalH5Dataset(Dataset):
    """
    用于外部测试的数据集。
    - *** 修改：返回一个字典，包含状态和数据 ***
    """

    def __init__(self, feats_path, df, task_label_col):
        self.df = df
        self.feats_path = feats_path
        self.task_label_col = task_label_col

    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        row = self.df.iloc[idx]
        slide_id = row["slide_id"]
        filepath = os.path.join(self.feats_path, slide_id + ".h5")
        label = torch.tensor(row[self.task_label_col], dtype=torch.float32)

        # 准备一个包含失败信息的字典
        fail_info = {
            "status": "failed",
            "slide_id": slide_id,
            "label": label.item(),
            "reason": "",
        }

        # 1. 检查文件是否存在
        if not os.path.exists(filepath):
            logger.warning(f"文件未找到: {filepath}")
            fail_info["reason"] = "file_not_found"
            return fail_info  # <-- 返回失败信息

        # 2. 尝试加载文件
        try:
            with h5py.File(filepath, "r") as f:
                features = torch.from_numpy(f["features"][:])
        except Exception as e:
            logger.error(f"加载H5文件失败 (文件可能已损坏): {filepath}")
            logger.error(f"错误: {e}")
            fail_info["reason"] = "file_corrupt"
            return fail_info  # <-- 返回失败信息


        # 3. 如果成功
        return {
            "status": "success",
            "features": features,
            "label": label,
            "slide_id": slide_id,
        }
# 训练/验证使用的数据集
class H5Dataset(Dataset):
    def __init__(self, feats_path, df, split, split_col, num_features=512):
        self.df = df[df[split_col] == split]
        self.feats_path = feats_path
        self.num_features = num_features  # 这是训练时采样的 patch 数量
        self.split = split
        # *** 修改: 修正特征维度硬编码 ***
        self.feature_dim = feature_dim  # 使用全局配置的特征维度

    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        row = self.df.iloc[idx]
        with h5py.File(
            os.path.join(self.feats_path, row["slide_id"] + ".h5"), "r"
        ) as f:
            features = torch.from_numpy(f["features"][:])  # 加载所有 [N_all, D]

        if self.split == "train":
            # (训练时的采样逻辑)
            num_available = features.shape[0]
            if num_available >= self.num_features:
                # 不重复采样
                indices = torch.randperm(
                    num_available, generator=torch.Generator().manual_seed(42)
                )[: self.num_features]
            else:
                # 有重复采样
                indices = torch.randint(
                    num_available,
                    (self.num_features,),
                    generator=torch.Generator().manual_seed(42),
                )
            features = features[indices]  # 采样后 [num_features, D]

        # 对于 'test', features 保持为 [N_all, D]

        label = torch.tensor(row[task_label_col], dtype=torch.float32)
        return features, label


# *** 修改：安全 Collate 函数 ***
def safe_collate(batch):
    """
    一个 collate_fn，用于过滤掉数据集中返回的 None 值。
    - *** 修改：分离成功和失败的样本 ***
    """

    # 分离成功和失败的样本
    good_items = []
    failed_samples = []
    for item in batch:
        if item is None:  # 以防万一
            continue
        if item["status"] == "success":
            # 仅将需要组合的数据放入 good_items
            good_items.append((item["features"], item["label"], item["slide_id"]))
        else:
            # 收集失败的样本信息
            failed_samples.append(
                {
                    "slide_id": item["slide_id"],
                    "label": item["label"],
                    "reason": item["reason"],
                }
            )

    # 如果没有成功的样本
    if not good_items:
        # 返回 (None, None, None) 对应特征、标签、slide_id，并返回失败列表
        return None, None, None, failed_samples

    # 使用默认的collate函数处理有效的数据
    collated_features, collated_labels, collated_slide_ids = (
        torch.utils.data.dataloader.default_collate(good_items)
    )

    return collated_features, collated_labels, collated_slide_ids, failed_samples


# *** 修改：外部测试函数 ***
def test_external_dataset(
    model_path,
    external_df,
    feats_path,
    device,
    task_label_col,
    plot_save_path=None,
    mismatch_save_path=None,
    missing_files_save_path=None,  # <-- 新增：缺失/损坏文件保存路径
):
    """
    加载保存的模型并在新的外部数据集上进行测试。

    参数:
    ... (其他参数)
    - mismatch_save_path (str, optional): 保存误分类样本的 CSV 路径
    - missing_files_save_path (str, optional): 保存缺失/损坏文件信息的 CSV 路径
    """
    logger.info(f"\n{'='*50}")
    logger.info(f"--- 开始外部测试 ---")
    logger.info(f"模型: {model_path}")
    logger.info(f"特征路径: {feats_path}")
    logger.info(f"测试样本数 (总计): {len(external_df)}")

    # 1. 初始化模型
    # model = BinaryClassificationModel().to(device)
    # model = DSMILModel().to(device)
    # model = TransMILModel().to(device)
    model = MAEMILModel(input_feature_dim=512).to(device)
    # model = MAEMILModelWithRegisters().to(device)

    # 2. 加载模型权重
    try:
        model.load_state_dict(torch.load(model_path, map_location=device))
        logger.info("模型权重加载成功。")
    except Exception as e:
        logger.error(f"加载模型权重失败: {e}")
        return

    # 3. 创建外部数据集 Dataloader
    external_dataset = ExternalH5Dataset(feats_path, external_df, task_label_col)
    external_loader = DataLoader(
        external_dataset,
        batch_size=1,  # bsz=1 评估
        shuffle=False,
        collate_fn=safe_collate,  # <-- 使用修改后的 safe_collate
    )

    # 4. 评估循环
    model.eval()
    all_labels, all_outputs = [], []
    correct = 0
    total = 0
    mismatched_samples = []  # 用于存储预测不匹配
    failed_load_samples = []  # <-- 新增：用于存储加载失败的样本

    with torch.no_grad():
        # *** 修改：循环以接收 collate 返回的4个值 ***
        for (
            batch_features,
            batch_labels,
            batch_slide_ids,
            failed_batch,
        ) in external_loader:

            # *** 新增：收集加载失败的样本 ***
            if failed_batch:
                failed_load_samples.extend(failed_batch)

            # *** 检查这批数据是否有效 (可能所有样本都加载失败) ***
            if batch_features is None or batch_labels is None:
                logger.warning("跳过一个 'None' 批次 (所有样本文件均缺失或损坏)")
                continue

            features = {"features": batch_features.to(device)}  # 包装成字典
            labels = batch_labels.to(device)
            slide_id = batch_slide_ids[0]  # (bsz=1)

            outputs = model(features)

            predicted = (outputs > 0).float()
            correct += (predicted == labels).sum().item()
            total += labels.size(0)

            all_outputs.append(outputs.cpu().numpy())
            all_labels.append(labels.cpu().numpy())

            # 检查预测不匹配
            label_val = labels.item()
            pred_val = predicted.item()
            if label_val != pred_val:
                mismatched_samples.append(
                    {
                        "slide_id": slide_id,
                        "true_label": int(label_val),
                        "predicted_label": int(pred_val),
                        "model_output_logit": outputs.item(),
                    }
                )

    # 5. 计算和报告指标

    # *** 新增：保存加载失败的样本信息 ***
    if missing_files_save_path:
        if failed_load_samples:
            try:
                failed_df = pd.DataFrame(failed_load_samples)
                failed_df.to_csv(missing_files_save_path, index=False)
                logger.info(
                    f"保存了 {len(failed_load_samples)} 个加载失败的样本信息到: {missing_files_save_path}"
                )
            except Exception as e:
                logger.error(f"保存加载失败样本 CSV 失败: {e}")
        else:
            logger.info("没有发现加载失败的样本。")

    # 如果所有文件都缺失/损坏
    if total == 0:
        logger.error("没有加载任何有效的样本。无法计算指标。")
        logger.error(f"{'='*50}\n")
        return np.nan, 0.0

    all_outputs = np.concatenate(all_outputs)
    all_labels = np.concatenate(all_labels)

    auc = np.nan
    if len(np.unique(all_labels)) > 1:
        auc = roc_auc_score(all_labels, all_outputs)

        # 绘制和保存 AUC 曲线
        if plot_save_path:
            try:
                fpr, tpr, _ = roc_curve(all_labels, all_outputs)
                plt.figure()
                plt.plot(
                    fpr,
                    tpr,
                    color="darkorange",
                    lw=2,
                    label=f"ROC curve (AUC = {auc:.4f})",
                )
                plt.plot([0, 1], [0, 1], color="navy", lw=2, linestyle="--")
                plt.xlim([0.0, 1.0])
                plt.ylim([0.0, 1.05])
                plt.xlabel("False Positive Rate")
                plt.ylabel("True Positive Rate")
                plt.title(f"ROC Curve - External Test")
                plt.legend(loc="lower right")
                plt.savefig(plot_save_path)
                plt.close()
                logger.info(f"AUC 曲线图已保存至: {plot_save_path}")
            except Exception as e:
                logger.error(f"保存 AUC 曲线图失败: {e}")

    else:
        logger.warning("外部测试集只包含一个类别。无法计算 AUC。")

    accuracy = correct / total

    logger.info("--- 外部测试结果 ---")
    logger.info(f"有效样本数 (成功加载并评估): {total}")
    logger.info(f"Test AUC: {auc:.4f}")
    logger.info(f"Test Accuracy: {accuracy:.4f}")

    # 保存预测误分类样本
    if mismatch_save_path:
        if mismatched_samples:
            try:
                mismatch_df = pd.DataFrame(mismatched_samples)
                mismatch_df.to_csv(mismatch_save_path, index=False)
                logger.info(
                    f"保存了 {len(mismatched_samples)} 个误分类样本到: {mismatch_save_path}"
                )
            except Exception as e:
                logger.error(f"保存误分类样本 CSV 失败: {e}")
        else:
            logger.info("没有发现误分类样本。")

    logger.info(f"{'='*50}\n")

    return auc, accuracy


# 1. 定义外部测试集的信息
# external_feats_path = "/data0/lcy/data/LNM/LNM_Zhujiang_conchv1_processed/20x_512px_0px_overlap/features_conch_v1"
external_feats_path = "/data0/lcy/data/LNM/LNM_Zhujiang_uni_v1_processed/10x_512px_0px_overlap/features_uni_v1"
external_df_path = "/data0/lcy/Patho-Bench/tools/zzylnm_zhujiang_splits/cohort.tsv"
external_label_col = "label"  # 确保这是你外部TSV中的正确列名

save_dir = "/data0/lcy/trident/tools/saved_models/maemil"
# 2. 选择一个已保存的模型进行测试 (例如，使用 'fold_1' 训练的模型)
model_to_test_path = os.path.join(save_dir, "model_fold_0.pth")

# 定义AUC图保存路径
plot_path = os.path.join(save_dir, "external_test_auc_curve.png")

# 定义误分类样本保存路径
mismatch_path = os.path.join(save_dir, "external_test_mismatches.csv")

# *** 新增：定义缺失/损坏文件保存路径 ***
missing_files_path = os.path.join(save_dir, "external_test_missing_files.csv")


# 3. 加载外部 DataFrame
try:
    external_df = pd.read_csv(external_df_path, sep="\t")
    logger.info(f"成功加载外部 manifest: {external_df_path}")

    # 4. 检查文件和 DF 是否存在
    if not os.path.exists(model_to_test_path):
        logger.error(f"找不到模型文件: {model_to_test_path}")
    elif not os.path.exists(external_feats_path):
        logger.error(f"找不到外部特征路径: {external_feats_path}")
    else:
        # 5. 运行测试函数
        test_external_dataset(
            model_path=model_to_test_path,
            external_df=external_df,
            feats_path=external_feats_path,
            device=device,
            task_label_col=external_label_col,
            plot_save_path=plot_path,
            mismatch_save_path=mismatch_path,
            missing_files_save_path=missing_files_path,  # <-- 传入新路径
        )

except FileNotFoundError:
    logger.error(f"找不到外部 manifest 文件: {external_df_path}")
except Exception as e:
    logger.error(f"加载外部 manifest 时出错: {e}")
