# --- 导入标准库 ---
import os
import sys
import logging
import datetime
import h5py

# --- 导入科学计算库 ---
import numpy as np
import pandas as pd
import scipy.stats as st
from tqdm import tqdm

# --- 导入 机器学习 (sklearn) 库 ---
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import roc_auc_score, accuracy_score
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
import joblib

# --- [新增] 导入 imbalanced-learn 库 ---
# 这是实现 SMOTE 等重采样技术的关键
# 你需要安装它: pip install imbalanced-learn
try:
    from imblearn.pipeline import Pipeline as ImbPipeline
    from imblearn.over_sampling import SMOTE, RandomOverSampler
    from imblearn.under_sampling import RandomUnderSampler
    from imblearn.combine import SMOTEENN
except ImportError:
    print("错误：未找到 'imbalanced-learn' 库。")
    print("请使用以下命令安装: pip install imbalanced-learn")
    sys.exit(1)


# ----------------------------------------------------------------------------
# --- 1. 集中配置 (CONFIG) ---
# ----------------------------------------------------------------------------
# 在这里修改所有路径和参数

CONFIG = {
    "gpu_id": "3",
    "seed": 42,
    "run_mode": "test_only",  # [新增] 运行模式: 'full', 'train_only', 'test_only'
    "feature_dim": 1024,  # 1024 for UNI, 512 for Conch
    "log_dir": "logs",
    "ml_save_dir": "saved_ml_models",
    "error_log_dir": "error_logs",
    "fold_columns": ["fold_0", "fold_1", "fold_2", "fold_3", "fold_4"],
    "task_label_col": "label",
    "aggregation_method": "mean",  # 'mean' or 'max' 'quantile'
    # --- [新增] 重采样 (Resampling) 配置 ---
    # 用于处理类别不平衡
    "resampling": {
        "enabled": True,  # 设为 True 以启用
        # 选项: "SMOTE", "RandomOverSampler", "RandomUnderSampler", "SMOTEENN", "None"
        "method": "SMOTE",
        "random_state": 42,
    },
    # --- [新增] 噪声增强 (Noise Augmentation) 配置 ---
    # 用于在特征空间中增加数据
    "noise_augmentation": {
        "enabled": False,  # 设为 True 以启用 (可以与 resampling 同时使用)
        "factor": 0.01,  # 噪声水平 (占特征标准差的百分比)
        "copies": 1,  # 为每个原始样本创建的噪声副本数量
    },
    # --- 内部交叉验证数据集 ---
    "main_data": {
        "name": "ZZYLNM_Internal",
        "tsv_path": "/data0/lcy/Patho-Bench/tools/zzylnm_slices_splits/k=all.tsv",
        "feats_path": "/data0/lcy/data/LNM/LNM_slices_uni_v1_processed/10x_512px_0px_overlap/features_uni_v1",
        # 切换到 Conch 示例 (取消注释以下两行)
        # "tsv_path": "/data0/lcy/Patho-Bench/tools/zzylnm_slices_splits2/cohort.tsv",
        # "feats_path": "/data0/lcy/data/LNM/LNM_slices_conch_v1_processed/20x_512px_0px_overlap/features_conch_v1",
        "label_col": "label",
    },
    # --- 外部测试数据集 ---
    "external_test": {
        "enabled": True,  # 设为 False 以跳过外部测试
        # 'model_to_test_path' 仅在 'run_mode' == 'full' 时使用
        "model_to_test_path": "saved_ml_models/LogisticRegression_fold_1.joblib",  # 要加载的模型
        "name": "ZZYLNM_Zhujiang",
        "tsv_path": "/data0/lcy/Patho-Bench/tools/zzylnm_zhujiang_splits/cohort.tsv",
        "feats_path": "/data0/lcy/data/LNM/LNM_Zhujiang_uni_v1_processed/10x_512px_0px_overlap/features_uni_v1",
        # "feats_path": "/data0/lcy/data/LNM/LNM_Zhujiang_uni_v1_processed/10x_512px_0px_overlap/features_uni_v1_imagenet",
        # 切换到 Conch 示例 (取消注释以下两行)
        # "tsv_path": "/data0/lcy/Patho-Bench/tools/zzylnm_zhujiang_splits/cohort.tsv",
        # "feats_path": "/data0/lcy/data/LNM/LNM_Zhujiang_conchv1_processed/20x_512px_0px_overlap/features_conch_v1",
        "label_col": "label",
    },
}

# ----------------------------------------------------------------------------
# --- 2. 日志和环境设置 ---
# ----------------------------------------------------------------------------

# 全局日志记录器
logger = logging.getLogger()


def setup_logging(log_dir: str) -> None:
    """配置全局日志记录器，输出到文件和控制台。"""
    os.makedirs(log_dir, exist_ok=True)

    now = datetime.datetime.now()
    timestamp = now.strftime("%Y%m%d_%H%M")
    log_file_path = os.path.join(log_dir, f"{timestamp}_cross_ml_val_training.log")

    logging.getLogger().handlers = []
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s - %(levelname)s - %(message)s",
        datefmt="%Y-m-d %H:%M:%S",
        handlers=[
            logging.FileHandler(log_file_path, mode="w"),
            logging.StreamHandler(sys.stdout),
        ],
    )
    logger.info("日志系统已启动。")
    logger.info(f"日志文件保存在: {log_file_path}")


def setup_environment(config: dict) -> None:
    """设置GPU可见性和随机种子。"""
    os.environ["CUDA_VISIBLE_DEVICES"] = config["gpu_id"]
    SEED = config["seed"]
    np.random.seed(SEED)
    logger.info(f"GPU 可见性设置为: {config['gpu_id']}")
    logger.info(f"随机种子设置为: {SEED}")


# ----------------------------------------------------------------------------
# --- 3. 数据加载与特征工程辅助函数 ---
# ----------------------------------------------------------------------------


def load_and_filter_manifest(tsv_path: str, h5_feats_path: str) -> pd.DataFrame:
    """
    加载 TSV/Parquet 清单文件，并根据 h5_feats_path 中实际存在的 .h5 文件进行过滤。
    """
    logger.info(f"--- 开始加载数据 ---")
    logger.info(f"Manifest (TSV/Parquet) 路径: {tsv_path}")
    logger.info(f"特征 (H5) 路径: {h5_feats_path}")

    parquet_path = tsv_path.replace(".tsv", ".parquet")

    if not os.path.exists(h5_feats_path):
        logger.error(f"特征路径不存在: {h5_feats_path}")
        raise FileNotFoundError(f"特征路径不存在: {h5_feats_path}")

    if not os.path.exists(tsv_path) and not os.path.exists(parquet_path):
        logger.error(f"Manifest 文件不存在: {tsv_path} 或 {parquet_path}")
        raise FileNotFoundError(f"Manifest 文件不存在: {tsv_path}")

    # 1. 加载 DataFrame
    if os.path.exists(parquet_path):
        logger.info(f"正在从更快的 Parquet 文件加载: {parquet_path}")
        df = pd.read_parquet(parquet_path)
    else:
        logger.info(f"首次加载 TSV 文件（可能需要一些时间）: {tsv_path}")
        df = pd.read_csv(tsv_path, sep="\t")
        try:
            logger.info(f"正在保存为 Parquet 格式以加快未来加载速度: {parquet_path}")
            df.to_parquet(parquet_path)
        except Exception as e:
            logger.warning(f"无法保存 Parquet 文件: {e}")

    logger.info(
        f"原始 Manifest 加载了 {len(df)} 行, {df['slide_id'].nunique()} 个唯一 slide_id。"
    )

    # 2. 获取所有可用的 H5 文件的基本名称（不带 .h5）
    available_files = [
        f.replace(".h5", "") for f in os.listdir(h5_feats_path) if f.endswith(".h5")
    ]
    available_files_set = set(available_files)
    logger.info(
        f"在 {h5_feats_path} 中找到 {len(available_files_set)} 个 .h5 特征文件。"
    )

    # 3. 检查 df 中有多少 ID 是缺失的
    df_ids = set(df["slide_id"].unique())
    missing_ids = df_ids - available_files_set

    if missing_ids:
        logger.warning(
            f"警告：在 Manifest 中找到了 {len(missing_ids)} 个 slide_id，但在 feats_path 中没有对应的 .h5 文件。"
        )
        logger.warning("将从 Manifest 中过滤掉这些缺失的 ID。")
        # logger.warning(f"缺失的 ID 示例: {list(missing_ids)[:5]}")

    # 4. 过滤 df，只保留那些 H5 文件确实存在的行
    original_count = len(df)
    df = df[df["slide_id"].isin(available_files_set)].copy()
    filtered_count = len(df)

    logger.info(f"过滤 Manifest：原始 {original_count} 行, 剩余 {filtered_count} 行。")

    if filtered_count == 0:
        logger.error("过滤后没有剩余的样本。请检查路径是否正确。")

    return df


def create_aggregated_features(
    df: pd.DataFrame, feats_path: str, label_col: str, agg_method: str = "mean"
) -> (dict, dict):
    """
    从H5文件中加载patch特征并将其聚合成单个滑片级特征向量。
    返回一个 (slide_id -> feature) 的映射 和 (slide_id -> label) 的映射。
    """
    logger.info(f"--- (ML) 开始创建聚合特征 ({agg_method} pooling) ---")
    feature_map = {}
    label_map = {}

    # 仅处理唯一的 slide_id，避免重复工作
    unique_slides = df[["slide_id", label_col]].drop_duplicates().set_index("slide_id")

    for slide_id, row in tqdm(
        unique_slides.iterrows(), total=len(unique_slides), desc="聚合特征"
    ):
        file_path = os.path.join(feats_path, slide_id + ".h5")
        try:
            with h5py.File(file_path, "r") as f:
                features = f["features"][:]

            if features.shape[0] == 0:
                logger.warning(f"文件 {file_path} 不包含特征，跳过。")
                continue

            # [新增] 检查特征维度是否匹配
            if features.shape[1] != CONFIG["feature_dim"]:
                logger.warning(
                    f"文件 {file_path} 特征维度 ({features.shape[1]}) 与 CONFIG ({CONFIG['feature_dim']}) 不符，跳过。"
                )
                continue

            if agg_method == "mean":
                agg_feat = np.mean(features, axis=0)
            elif agg_method == "max":
                agg_feat = np.max(features, axis=0)
            # --- [新增] 分位数聚合 (Quantile Pooling) ---
            elif agg_method == "quantile":
                # 计算沿特征轴 (axis=0) 的多个分位数
                # q=[0, 25, 50, 75, 100] 会得到 min, 25%, median, 75%, max
                # q=[25, 50, 75, 95] 是一个更稳健的选择
                percentiles = [25, 50, 75, 95]

                # np.percentile 会返回一个 (len(percentiles), num_features) 的数组
                # 即 (4, 1024)
                quantile_feats = np.percentile(features, q=percentiles, axis=0)

                # 将其展平为 1D 向量 (1, 4*1024)
                agg_feat = quantile_feats.flatten()
            # --- [新增结束] ---
            else:
                logger.error(f"未知的聚合方法: {agg_method}")
                raise ValueError(f"未知的聚合方法: {agg_method}")

            feature_map[slide_id] = agg_feat
            label_map[slide_id] = row[label_col]
        except Exception as e:
            logger.warning(f"无法处理文件 {file_path}: {e}")

    logger.info(f"聚合特征创建完毕。共处理 {len(feature_map)} 个滑片。")
    return feature_map, label_map


# --- [新增] 噪声增强辅助函数 ---
def augment_with_noise(
    X_train: np.ndarray, y_train: np.ndarray, config: dict
) -> (np.ndarray, np.ndarray):
    """
    通过添加高斯噪声来简单地增强训练集。
    """
    if not config.get("enabled", False) or config.get("copies", 0) == 0:
        return X_train, y_train  #

    copies = config.get("copies", 1)
    factor = config.get("factor", 0.01)

    X_augmented = [X_train]
    y_augmented = [y_train]

    logger.info(f"--- (ML) 正在应用噪声增强 (Copies: {copies}, Factor: {factor}) ---")

    for _ in range(copies):
        # 计算每个特征的标准差
        std_devs = np.std(X_train, axis=0)
        # 计算噪声尺度
        noise_scale = std_devs * factor

        # 生成高斯噪声 (均值为0, 标准差为1), 然后缩放
        noise = np.random.normal(0, 1, X_train.shape) * noise_scale

        X_noisy = X_train + noise

        X_augmented.append(X_noisy)
        y_augmented.append(y_train)  # 标签保持不变

    X_train_final = np.concatenate(X_augmented, axis=0)
    y_train_final = np.concatenate(y_augmented, axis=0)

    logger.info(f"噪声增强后: 原始样本 {len(X_train)}, 增强后样本 {len(X_train_final)}")

    return X_train_final, y_train_final


# --- [新增] 重采样辅助函数 ---
def get_resampler(config: dict) -> (object, str):
    """
    根据配置返回一个 imblearn 重采样器实例。
    """
    method = config.get("method", "None")
    seed = config.get("random_state", 42)

    if not config.get("enabled", False) or method == "None":
        return None, "No Resampling"  # 返回 None 和一个名字

    # 注意: k_neighbors=3 是一个更安全的选择, 以防少数类样本非常少
    safe_k_neighbors = 3

    if method == "SMOTE":
        return SMOTE(random_state=seed, k_neighbors=safe_k_neighbors), "SMOTE"
    elif method == "RandomOverSampler":
        return RandomOverSampler(random_state=seed), "RandomOverSampler"
    elif method == "RandomUnderSampler":
        return RandomUnderSampler(random_state=seed), "RandomUnderSampler"
    elif method == "SMOTEENN":
        smote = SMOTE(random_state=seed, k_neighbors=safe_k_neighbors)
        return SMOTEENN(random_state=seed, smote=smote), "SMOTEENN"
    else:
        logger.warning(f"未知的重采样方法: {method}. 将不使用重采样。")
        return None, "No Resampling"


# --- [修改] get_ml_models 函数 ---
def get_ml_models(seed: int, resampling_config: dict) -> dict:
    """
    返回一个包含待训练的 sklearn 模型的字典。
    [修改]：现在使用 ImbPipeline 并根据 config 添加重采样步骤。
    """

    resampler, resampler_name = get_resampler(resampling_config)
    logger.info(f"为 ML 流水线配置重采样方法: {resampler_name}")

    # 我们将使用 ImbPipeline 来包含标准化和重采样
    # ImbPipeline 会智能地只对 'fit' 阶段应用重采样
    models_to_test = {}

    # 1. LogisticRegression (Scaler -> Resampler -> CLF)
    lr_steps = [("scaler", StandardScaler())]
    if resampler:
        lr_steps.append(("sampler", resampler))
    lr_steps.append(
        (
            "clf",
            LogisticRegression(random_state=seed, max_iter=1000, solver="liblinear"),
        )
    )
    models_to_test["LogisticRegression"] = ImbPipeline(lr_steps)

    # 2. RandomForest (Resampler -> CLF) - 不需要 Scaler
    rf_steps = []
    if resampler:
        rf_steps.append(("sampler", resampler))
    rf_steps.append(
        ("clf", RandomForestClassifier(random_state=seed, n_estimators=100))
    )
    models_to_test["RandomForest"] = ImbPipeline(rf_steps)

    # 3. SVM (RBF) (Scaler -> Resampler -> CLF)
    svm_steps = [("scaler", StandardScaler())]
    if resampler:
        svm_steps.append(("sampler", resampler))
    svm_steps.append(("clf", SVC(kernel="rbf", probability=True, random_state=seed)))
    models_to_test["SVM (RBF)"] = ImbPipeline(svm_steps)

    # 4. GradientBoosting (Resampler -> CLF) - 不需要 Scaler
    gb_steps = []
    if resampler:
        gb_steps.append(("sampler", resampler))
    gb_steps.append(
        ("clf", GradientBoostingClassifier(random_state=seed, n_estimators=100))
    )
    models_to_test["GradientBoosting"] = ImbPipeline(gb_steps)

    # 5. KNeighbors (Scaler -> Resampler -> CLF)
    knn_steps = [("scaler", StandardScaler())]
    if resampler:
        knn_steps.append(("sampler", resampler))
    knn_steps.append(("clf", KNeighborsClassifier(n_neighbors=5)))
    models_to_test["KNeighbors (k=5)"] = ImbPipeline(knn_steps)

    return models_to_test


# ----------------------------------------------------------------------------
# --- 4. 指标计算函数 (DeLong CI) ---
# ----------------------------------------------------------------------------


def delong_roc_variance(y_true: np.ndarray, y_scores: np.ndarray) -> (float, float):
    """
    Computes the variance of the AUC using DeLong's method.
    Returns: (auc, var_auc)
    """
    y_true = np.array(y_true)
    y_scores = np.array(y_scores)

    if set(np.unique(y_true)) != {0, 1}:
        # 允许只有一个类别时返回 NaN
        if len(np.unique(y_true)) == 1:
            return np.nan, np.nan
        raise ValueError("Labels must be binary (0 and 1).")

    # 提取阳性和阴性得分
    pos_scores = y_scores[y_true == 1]
    neg_scores = y_scores[y_true == 0]

    n_pos = len(pos_scores)
    n_neg = len(neg_scores)

    if n_pos == 0 or n_neg == 0:
        return np.nan, np.nan

    # 高效实现来自: https://github.com/yandexdataschool/roc_comparison
    # 使用广播 (broadcasting) 计算 V10 矩阵
    # V10[i, j] = 1 if pos_scores[i] > neg_scores[j] else 0.5 if == else 0
    V10 = (pos_scores[:, None] > neg_scores[None, :]).astype(float)
    V10_equal = (pos_scores[:, None] == neg_scores[None, :]).astype(float)
    V10 += 0.5 * V10_equal

    # Compute V01 = V10.T
    V01 = V10.T

    # Compute S10 and S01
    S10 = np.mean(V10, axis=1)  # 对应每个阳性样本
    S01 = np.mean(V01, axis=1)  # 对应每个阴性样本

    # AUC
    auc = np.mean(S10)  # 等价于 np.mean(V10)

    # Variance
    # 使用 ddof=1 来获取样本方差 (n-1)，这在统计中更常见
    var_auc = np.var(S10, ddof=1) / n_pos + np.var(S01, ddof=1) / n_neg

    return auc, var_auc


def delong_roc_ci(
    y_true: np.ndarray, y_scores: np.ndarray, alpha: float = 0.05
) -> (float, float, float, float):
    """
    Compute DeLong 95% CI for AUC and p-value (H0: AUC=0.5).
    Returns: (auc, lower_ci, upper_ci, p_value)
    """
    auc, var_auc = delong_roc_variance(y_true, y_scores)

    if np.isnan(auc) or np.isnan(var_auc) or var_auc <= 0:
        return auc, np.nan, np.nan, np.nan

    se = np.sqrt(var_auc)
    z = st.norm.ppf(1 - alpha / 2)
    lower = auc - z * se
    upper = auc + z * se

    # p-value (H0: AUC=0.5)
    p_value = np.nan
    if se > 0:
        z_score_p = (auc - 0.5) / se
        p_value = st.norm.sf(z_score_p)  # One-sided p-value (Ha: AUC > 0.5)

    # Clamp to [0, 1]
    lower = max(0.0, lower)
    upper = min(1.0, upper)

    return auc, lower, upper, p_value


# ----------------------------------------------------------------------------
# --- 5. 核心功能函数 ---
# ----------------------------------------------------------------------------


def run_cross_validation(config: dict) -> None:
    """
    执行 5 折交叉验证。
    """
    logger.info(f"\n{'='*60}")
    logger.info("--- 开始 5-折交叉验证 (传统 ML 方法) ---")
    logger.info(f"数据集: {config['main_data']['name']}")
    logger.info(f"{'='*60}")

    # 1. 加载数据和创建特征
    try:
        df = load_and_filter_manifest(
            config["main_data"]["tsv_path"], config["main_data"]["feats_path"]
        )
        if df.empty:
            logger.error("Manifest 为空，无法继续交叉验证。")
            return

        # 检查标签分布
        label_col = config["task_label_col"]
        if label_col not in df.columns:
            logger.error(f"标签列 '{label_col}' 不在 DataFrame 中。")
            return
        logger.info(f"标签分布 (列: {label_col}):")
        logger.info(f"\n{df[label_col].value_counts()}")

        feature_map, label_map = create_aggregated_features(
            df,
            config["main_data"]["feats_path"],
            config["main_data"]["label_col"],
            config["aggregation_method"],
        )

        if not feature_map:
            logger.error("未能创建任何聚合特征，终止交叉验证。")
            return

    except Exception as e:
        logger.error(f"加载数据或创建特征时发生严重错误: {e}", exc_info=True)
        return

    # 2. 准备模型和结果存储
    os.makedirs(config["ml_save_dir"], exist_ok=True)
    logger.info(f"传统 ML 模型将保存到: {config['ml_save_dir']}")

    # [修改]：传入 resampling config
    models_to_test = get_ml_models(config["seed"], config["resampling"])

    all_models_results = {name: {"aucs": [], "accs": []} for name in models_to_test}
    fold_columns = config["fold_columns"]

    # 3. 交叉验证循环
    for fold_idx, current_split_col in enumerate(fold_columns):
        logger.info(
            f"\n--- (ML) 开始第 {fold_idx + 1}/{len(fold_columns)} 折 ({current_split_col}) ---"
        )

        if current_split_col not in df.columns:
            logger.warning(
                f"折叠列 '{current_split_col}' 不在 DataFrame 中，跳过此折叠。"
            )
            continue

        # 4. 根据当前的折叠准备数据
        train_ids = df[df[current_split_col] == "train"]["slide_id"].unique()
        test_ids = df[df[current_split_col] == "test"]["slide_id"].unique()

        # 使用 feature_map 和 label_map 构建 X_train, y_train, X_test, y_test
        # 确保只使用在聚合过程中成功处理的ID
        X_train = [feature_map[sid] for sid in train_ids if sid in feature_map]
        y_train = [label_map[sid] for sid in train_ids if sid in label_map]

        X_test = [feature_map[sid] for sid in test_ids if sid in feature_map]
        y_test = [label_map[sid] for sid in test_ids if sid in label_map]

        if not X_train or not X_test:
            logger.warning(f"Fold {fold_idx + 1} 中缺少训练或测试数据 (聚合后)，跳过。")
            continue

        # 转换为 numpy 数组
        X_train = np.array(X_train)
        y_train = np.array(y_train)
        X_test = np.array(X_test)
        y_test = np.array(y_test)

        # [新增] 应用噪声增强 (Noise Augmentation)
        # 这会发生在 ImbPipeline (SMOTE等) 之前
        X_train_aug, y_train_aug = augment_with_noise(
            X_train, y_train, config["noise_augmentation"]
        )

        logger.info(
            f"(ML) Fold {fold_idx + 1}: 原始 Train samples = {len(X_train)}, 增强后 Train samples = {len(X_train_aug)}, Test samples = {len(X_test)}"
        )

        # 记录训练集（增强后）和测试集标签分布
        try:
            train_labels, train_counts = np.unique(y_train_aug, return_counts=True)
            test_labels, test_counts = np.unique(y_test, return_counts=True)

            train_counts_str = ", ".join(
                [f"类别 {l}: {c} 个" for l, c in zip(train_labels, train_counts)]
            )
            test_counts_str = ", ".join(
                [f"类别 {l}: {c} 个" for l, c in zip(test_labels, test_counts)]
            )

            logger.info(
                f"Fold {fold_idx + 1} 训练集(增强后)标签分布: {train_counts_str}"
            )
            logger.info(f"Fold {fold_idx + 1} 测试集标签分布: {test_counts_str}")

            # 检查 SMOTE 的 k_neighbors 是否安全
            if config["resampling"]["enabled"] and config["resampling"]["method"] in [
                "SMOTE",
                "SMOTEENN",
            ]:
                min_class_count = np.min(train_counts)
                # k_neighbors=3 (在 get_resampler 中设置)
                if min_class_count <= 3:
                    logger.warning(
                        f"Fold {fold_idx + 1}: 训练集中最小类别样本数 ({min_class_count}) 过低, SMOTE (k=3) 可能会失败或效果不佳。"
                    )

        except Exception:
            pass  # 失败也无妨

        # 5. 训练和评估每个模型
        for model_name, model in models_to_test.items():
            try:
                logger.info(f"   训练 {model_name}...")

                # [修改]：使用增强后的数据 (X_train_aug, y_train_aug)
                # model (ImbPipeline) 会在内部处理 SMOTE 等
                model.fit(X_train_aug, y_train_aug)

                # --- 保存训练好的 ML 模型 ---
                model_save_path = os.path.join(
                    config["ml_save_dir"], f"{model_name}_fold_{fold_idx}.joblib"
                )
                try:
                    joblib.dump(model, model_save_path)
                    logger.info(f"  模型已保存到: {model_save_path}")
                except Exception as e:
                    logger.error(
                        f"   保存模型 {model_name} (Fold {fold_idx}) 失败: {e}"
                    )

                # --- 评估 ---
                # ImbPipeline 在 predict 时会自动跳过重采样步骤
                y_pred_proba = model.predict_proba(X_test)[:, 1]  # 用于 AUC
                y_pred_class = model.predict(X_test)  # 用于 Accuracy

                # 使用 delong_roc_ci
                auc, auc_ci_low, auc_ci_high, p_val = delong_roc_ci(
                    y_test, y_pred_proba
                )
                acc = accuracy_score(y_test, y_pred_class)

                if np.isnan(auc):
                    logger.warning(
                        f"   {model_name}: Test set 只有一个类别, 无法计算 AUC。"
                    )

                logger.info(
                    f"   {model_name} 结果: AUC = {auc:.4f} (95% CI: {auc_ci_low:.4f}-{auc_ci_high:.4f}, p={p_val:.4g}), Accuracy = {acc:.4f}"
                )

                all_models_results[model_name]["aucs"].append(auc)
                all_models_results[model_name]["accs"].append(acc)

            except Exception as e:
                logger.error(f"   训练/评估 {model_name} 时出错: {e}", exc_info=True)
                if "k_neighbors" in str(e):
                    logger.error(
                        "错误可能与 SMOTE 的 k_neighbors 有关。尝试减少少数类样本，或在 get_resampler 中减小 k_neighbors。"
                    )
                all_models_results[model_name]["aucs"].append(np.nan)
                all_models_results[model_name]["accs"].append(np.nan)

    # --- 传统 ML 交叉验证总结 ---
    logger.info(f"\n{'='*50}")
    logger.info("--- 传统 ML 5-折交叉验证总结 (Mean-Pooled Features) ---")
    logger.info(f"数据集: {config['main_data']['name']}")
    logger.info(f"噪声增强: {config['noise_augmentation']['enabled']}")
    logger.info(
        f"重采样: {config['resampling']['enabled']} (Method: {config['resampling']['method']})"
    )
    logger.info(f"{'='*50}")

    for model_name, results in all_models_results.items():
        mean_auc = np.nanmean(results["aucs"])
        std_auc = np.nanstd(results["aucs"])
        mean_acc = np.nanmean(results["accs"])
        std_acc = np.nanstd(results["accs"])

        logger.info(f"模型: {model_name}")
        logger.info(f"   平均 AUC: {mean_auc:.4f} \u00b1 {std_auc:.4f}")
        logger.info(f"   平均 Accuracy: {mean_acc:.4f} \u00b1 {std_acc:.4f}")
        logger.info("-" * 20)


def save_prediction_errors(
    slide_ids: list,
    y_true: np.ndarray,
    y_pred_class: np.ndarray,
    y_pred_proba: np.ndarray,
    config: dict,
    model_name: str,
) -> None:
    """
    保存预测错误的样本到 CSV 文件。
    """
    try:
        error_log_dir = config["error_log_dir"]
        os.makedirs(error_log_dir, exist_ok=True)

        # 找到预测不正确的样本的索引
        incorrect_indices = np.where(y_true != y_pred_class)[0]
        logger.info(
            f"在 {len(y_true)} 个样本中发现 {len(incorrect_indices)} 个错误预测。"
        )

        if len(incorrect_indices) > 0:
            error_data = []
            # 遍历所有错误索引
            for idx in incorrect_indices:
                error_data.append(
                    {
                        "slide_id": slide_ids[idx],  # 使用对齐的slide_id
                        "true_label": y_true[idx],
                        "predicted_label": y_pred_class[idx],
                        "predicted_proba_class_1": y_pred_proba[idx],
                    }
                )

            error_df = pd.DataFrame(error_data)

            # 定义保存路径
            error_filename = (
                f"errors_{model_name}_external_{config['external_test']['name']}.csv"
            )
            error_save_path = os.path.join(error_log_dir, error_filename)

            error_df.to_csv(error_save_path, index=False)
            logger.info(f"错误预测样本已保存到: {error_save_path}")

    except Exception as e:
        logger.error(f"保存错误预测样本时失败: {e}", exc_info=True)


def run_external_test(config: dict) -> None:
    """
    加载一个保存的 sklearn 模型并在外部数据集上进行测试。
    [注意]：由于重采样器 (SMOTE) 是流水线的一部分，
    加载的模型 (ImbPipeline) 在调用 .predict() 或 .predict_proba() 时
    会自动跳过重采样步骤。因此此函数不需要修改。
    """
    if not config["external_test"]["enabled"]:
        logger.info(f"\n{'='*60}")
        logger.info("--- 外部 ML 测试已禁用，跳过。 ---")
        logger.info(f"{'='*60}")
        return

    logger.info(f"\n{'='*60}")
    logger.info("--- 开始 ML 外部数据集测试 (单模型) ---")
    logger.info(f"数据集: {config['external_test']['name']}")
    logger.info(f"{'='*60}")

    model_path = config["external_test"]["model_to_test_path"]
    ext_tsv_path = config["external_test"]["tsv_path"]
    ext_feats_path = config["external_test"]["feats_path"]
    ext_label_col = config["external_test"]["label_col"]

    try:
        # 1. 检查模型文件
        if not os.path.exists(model_path):
            logger.error(f"找不到模型文件: {model_path}")
            logger.error("请先运行交叉验证以生成模型，或检查 CONFIG 中的路径。")
            return

        # 2. 加载外部数据和创建特征
        external_df = load_and_filter_manifest(ext_tsv_path, ext_feats_path)
        if external_df.empty:
            logger.error("外部 Manifest 为空，无法继续测试。")
            return

        ext_feature_map, ext_label_map = create_aggregated_features(
            external_df, ext_feats_path, ext_label_col, config["aggregation_method"]
        )

        if not ext_feature_map:
            logger.error("无法为外部数据创建任何聚合特征。")
            return

        # 3. 准备 X_test 和 y_test (并确保 slide_id 对齐)
        # 使用 external_df 中的 unique slide_id 来保证顺序，并过滤掉不在 feature_map 中的 ID
        slide_ids_in_order = [
            sid for sid in external_df["slide_id"].unique() if sid in ext_feature_map
        ]

        if not slide_ids_in_order:
            logger.error("在 feature_map 中没有找到有效的 slide_id。")
            return

        X_external = np.array([ext_feature_map[sid] for sid in slide_ids_in_order])
        y_external = np.array([ext_label_map[sid] for sid in slide_ids_in_order])

        logger.info(f"准备好 {len(X_external)} 个外部测试样本。")

        # 4. 统计并显示 y_external (测试集) 的类别分布
        try:
            unique_labels, counts = np.unique(y_external, return_counts=True)
            class_counts_str = ", ".join(
                [
                    f"类别 {label}: {count} 个"
                    for label, count in zip(unique_labels, counts)
                ]
            )
            logger.info(f"外部测试集类别分布: {class_counts_str}")
        except Exception as e:
            logger.warning(f"无法计算测试集类别分布: {e}")

        # 5. 加载模型
        logger.info(f"加载模型: {model_path}")
        model = joblib.load(model_path)
        model_name_simple = os.path.basename(model_path).replace(".joblib", "")

        # 6. 评估
        logger.info("开始评估...")
        y_pred_proba = model.predict_proba(X_external)[:, 1]  # 用于 AUC
        y_pred_class = model.predict(X_external)  # 用于 Accuracy

        # 7. 保存错误预测
        save_prediction_errors(
            slide_ids_in_order,
            y_external,
            y_pred_class,
            y_pred_proba,
            config,
            model_name_simple,
        )

        # 8. 记录最终结果 (使用 DeLong CI)
        auc, auc_ci_low, auc_ci_high, p_val = delong_roc_ci(y_external, y_pred_proba)
        acc = accuracy_score(y_external, y_pred_class)

        if np.isnan(auc):
            logger.warning("外部测试集只有一个类别, 无法计算 AUC。")
            logger.warning(f"唯一的类别是: {np.unique(y_external)[0]}")

        logger.info(f"--- 外部 ML 测试结果 ---")
        logger.info(f"模型: {model_path}")
        logger.info(
            f" 外部测试 AUC: {auc:.4f} (95% CI: {auc_ci_low:.4f}-{auc_ci_high:.4f}, p={p_val:.4g})"
        )
        logger.info(f" 外部测试 Accuracy: {acc:.4f}")
        logger.info(f"{'='*60}")

    except Exception as e:
        logger.error(f"运行 ML 外部测试时发生严重错误: {e}", exc_info=True)


# --- [新增] 批量测试函数 ---
def run_batch_external_test(config: dict) -> None:
    """
    [新增] 加载 'ml_save_dir' 中的 *所有* .joblib 模型, 并在外部数据集上进行测试。
    此函数在 'run_mode' == 'test_only' 时被调用。
    """
    if not config["external_test"]["enabled"]:
        logger.info(f"\n{'='*60}")
        logger.info("--- 外部 ML (批量) 测试已禁用，跳过。 ---")
        logger.info(f"{'='*60}")
        return

    logger.info(f"\n{'='*60}")
    logger.info("--- 开始 ML 外部数据集 (批量) 测试 ---")
    logger.info(f"--- 将测试 {config['ml_save_dir']} 中的所有模型 ---")
    logger.info(f"数据集: {config['external_test']['name']}")
    logger.info(f"{'='*60}")

    model_dir = config["ml_save_dir"]
    ext_tsv_path = config["external_test"]["tsv_path"]
    ext_feats_path = config["external_test"]["feats_path"]
    ext_label_col = config["external_test"]["label_col"]

    try:
        # 1. 检查模型目录
        if not os.path.exists(model_dir):
            logger.error(f"找不到模型目录: {model_dir}")
            return

        # 2. 查找所有 .joblib 模型
        model_files = [f for f in os.listdir(model_dir) if f.endswith(".joblib")]
        if not model_files:
            logger.warning(f"在 {model_dir} 中没有找到 .joblib 模型文件。")
            return

        logger.info(f"在 {model_dir} 中找到了 {len(model_files)} 个模型。")

        # 3. 加载外部数据和创建特征 (只执行一次)
        external_df = load_and_filter_manifest(ext_tsv_path, ext_feats_path)
        if external_df.empty:
            logger.error("外部 Manifest 为空，无法继续测试。")
            return

        ext_feature_map, ext_label_map = create_aggregated_features(
            external_df, ext_feats_path, ext_label_col, config["aggregation_method"]
        )
        if not ext_feature_map:
            logger.error("无法为外部数据创建任何聚合特征。")
            return

        # 4. 准备 X_test 和 y_test (并确保 slide_id 对齐)
        slide_ids_in_order = [
            sid for sid in external_df["slide_id"].unique() if sid in ext_feature_map
        ]
        if not slide_ids_in_order:
            logger.error("在 feature_map 中没有找到有效的 slide_id。")
            return

        X_external = np.array([ext_feature_map[sid] for sid in slide_ids_in_order])
        y_external = np.array([ext_label_map[sid] for sid in slide_ids_in_order])

        logger.info(f"准备好 {len(X_external)} 个外部测试样本。")

        # 5. 统计并显示 y_external (测试集) 的类别分布
        try:
            unique_labels, counts = np.unique(y_external, return_counts=True)
            class_counts_str = ", ".join(
                [
                    f"类别 {label}: {count} 个"
                    for label, count in zip(unique_labels, counts)
                ]
            )
            logger.info(f"外部测试集类别分布: {class_counts_str}")
        except Exception as e:
            logger.warning(f"无法计算测试集类别分布: {e}")

        logger.info(f"{'-'*60}")

        # 6. 循环遍历每个模型进行评估
        results_summary = []
        for model_filename in model_files:
            model_path = os.path.join(model_dir, model_filename)
            model_name_simple = model_filename.replace(".joblib", "")

            logger.info(f"--- 正在测试模型: {model_filename} ---")
            try:
                # 6a. 加载模型
                model = joblib.load(model_path)

                # 6b. 评估
                y_pred_proba = model.predict_proba(X_external)[:, 1]
                y_pred_class = model.predict(X_external)

                # 6c. 保存错误预测
                save_prediction_errors(
                    slide_ids_in_order,
                    y_external,
                    y_pred_class,
                    y_pred_proba,
                    config,
                    model_name_simple,  # 使用模型文件名作为标识
                )

                # 6d. 记录结果 (使用 DeLong CI)
                auc, auc_ci_low, auc_ci_high, p_val = delong_roc_ci(
                    y_external, y_pred_proba
                )
                acc = accuracy_score(y_external, y_pred_class)

                if np.isnan(auc):
                    logger.warning(" 外部测试集只有一个类别, 无法计算 AUC。")

                logger.info(
                    f"  结果: AUC = {auc:.4f} (95% CI: {auc_ci_low:.4f}-{auc_ci_high:.4f}, p={p_val:.4g}), Accuracy = {acc:.4f}"
                )
                results_summary.append(
                    {"model": model_filename, "auc": auc, "acc": acc}
                )

            except Exception as e:
                logger.error(f"  评估模型 {model_filename} 时失败: {e}", exc_info=True)
                results_summary.append(
                    {"model": model_filename, "auc": np.nan, "acc": np.nan}
                )

            logger.info(f"{'-'*60}")

        # 7. 打印最终总结

        # --- [修改] ---
        # 按照 AUC (从小到大) 排序，将 nan 排在最后
        try:
            # (np.isnan(x["auc"]), x["auc"])
            # 这会创建一个元组.
            # np.isnan(x["auc"]) 是 True (1) 或 False (0).
            # 排序时, False (0) 会排在 True (1) 前面,
            # 所以所有有效数字 (False) 会排在 nan (True) 前面。
            # 对于所有有效数字, 它们会按 x["auc"] (元组的第二个元素) 升序排列。
            results_summary_sorted = sorted(
                results_summary, key=lambda x: (np.isnan(x["auc"]), x["auc"])
            )
        except Exception as e:
            logger.warning(f"排序结果时出错: {e}。将打印未排序的结果。")
            results_summary_sorted = results_summary
        # --- [修改结束] ---

        logger.info(f"\n{'='*60}")
        logger.info(
            f"--- 批量外部测试总结 (数据集: {config['external_test']['name']}) [按 AUC 升序排列] ---"  # [修改] 添加了排序提示
        )

        # --- [修改] ---
        # 遍历排序后的列表
        for res in results_summary_sorted:
            # --- [修改结束] ---
            logger.info(
                f"  模型: {res['model']:<30} | AUC: {res['auc']:.4f} | Acc: {res['acc']:.4f}"
            )
        logger.info(f"{'='*60}")

    except Exception as e:
        logger.error(f"运行 ML 批量外部测试时发生严重错误: {e}", exc_info=True)


# ----------------------------------------------------------------------------
# --- 6. 主执行函数 ---
# ----------------------------------------------------------------------------


def main():
    """
    主执行函数
    """
    # 1. 加载配置并设置环境
    # (CONFIG 是全局定义的，所以我们直接使用)
    setup_logging(CONFIG["log_dir"])
    setup_environment(CONFIG)

    logger.info(f"\n{'*' * 80}")
    logger.info("--- 实验开始 ---")

    # [修改]：根据 run_mode 决定执行哪个函数
    run_mode = CONFIG.get("run_mode", "full")
    logger.info(f"--- 运行模式: {run_mode} ---")
    logger.info(f"{'*' * 80}")

    # 2. 运行交叉验证 (训练和评估)
    if run_mode in ["full", "train_only"]:
        try:
            run_cross_validation(CONFIG)
        except Exception as e:
            logger.critical(f"交叉验证过程中发生未捕获的致命错误: {e}", exc_info=True)
    else:
        logger.info("--- (ML) 跳过交叉验证 (训练) 阶段 ---")

    # 3. 运行外部测试 (加载模型并测试)
    if run_mode == "full":
        logger.info(
            "--- (ML) 运行单模型外部测试 (由 config['external_test']['model_to_test_path'] 定义) ---"
        )
        try:
            run_external_test(CONFIG)  # 这会测试 config 中指定的 *单个* 模型
        except Exception as e:
            logger.critical(f"外部测试过程中发生未捕获的致命错误: {e}", exc_info=True)

    elif run_mode == "test_only":
        logger.info(
            "--- (ML) 运行批量模型外部测试 (测试 'ml_save_dir' 中的所有模型) ---"
        )
        try:
            run_batch_external_test(CONFIG)  # [新] 这会测试 *所有* .joblib 模型
        except Exception as e:
            logger.critical(
                f"批量外部测试过程中发生未捕获的致命错误: {e}", exc_info=True
            )

    else:  # 这涵盖了 'train_only' 模式
        logger.info("--- (ML) 跳过外部测试阶段 ---")

    logger.info(f"\n{'*' * 80}")
    logger.info("--- 所有任务已完成 ---")
    logger.info(f"{'*' * 80}")


if __name__ == "__main__":
    main()
