"""
MIL 训练的辅助函数：
- set_seed: 设置随机种子
- load_manifest: 加载和过滤 manifest 文件
- get_model: 模型工厂
- get_dataloaders: 创建训练和验证 DataLoader
- calculate_metrics_with_ci_pvalue: 计算统计指标
"""

import os
import numpy as np
import torch
import torch.nn as nn
import pandas as pd
from torch.utils.data import DataLoader
import logging
import sys
from scipy import stats
import warnings
from typing import Dict, Union, Tuple, Optional, List

# 假设这些是您本地的模块
try:
    from trident.fpt.models.mil_models import (
        BinaryClassificationModel,
        DSMILModel,
        TransMILModel,
        HybridAttnMILModel,
        LinearProbeModel,
        MAEMILModel,
        CLAMModel,
        TransMIL_CLSToken,
        MAEMILModel_CLSToken,
        # TransMIL_GatedAttention,
        # DomainClassifier,
 
    )
    from fpt.m3dataset.m3il_dataset import H5Dataset, ExternalH5Dataset
    from fpt.lumen_logger import setup_logging
except ImportError:
    # 如果作为独立脚本运行或结构不同，提供一个回退
    print(
        "Warning: Could not perform relative imports. "
        "Assuming models/dataset/logger are in the same path or PYTHONPATH."
    )

    from ..models.mil_models import (
        BinaryClassificationModel,
        DSMILModel,
        TransMILModel,
        HybridAttnMILModel,
        LinearProbeModel,
        MAEMILModel,
        CLAMModel,
        TransMIL_CLSToken,
        MAEMILModel_CLSToken,
        # DomainClassifier
    )
    from ..m3dataset.m3il_dataset import H5Dataset, ExternalH5Dataset
    from ..lumen_logger import setup_logging


def set_seed(seed_value: int):
    """设置所有随机种子以保证可复现性"""
    np.random.seed(seed_value)
    torch.manual_seed(seed_value)
    torch.cuda.manual_seed_all(seed_value)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    logging.info(f"随机种子设置为 {seed_value}")


def load_manifest(
    manifest_path: str, h5_feats_path: str, label_col: str
) -> Optional[pd.DataFrame]:
    """
    加载 TSV 或 Parquet manifest 文件，并过滤掉 H5 特征文件不存在的样本。
    """
    logger = logging.getLogger()
    logger.info(f"正在加载 manifest: {manifest_path}")

    if not os.path.exists(manifest_path):
        logger.error(f"Manifest 文件未找到: {manifest_path}")
        return None

    if not os.path.exists(h5_feats_path):
        logger.error(f"特征路径未找到: {h5_feats_path}")
        return None

    # 1. 加载 DataFrame
    try:
        parquet_path = manifest_path.replace(".tsv", ".parquet")
        if os.path.exists(parquet_path):
            logger.debug(f"正在从 Parquet 加载: {parquet_path}")
            df = pd.read_parquet(parquet_path)
        else:
            logger.info(f"正在从 TSV 加载 (首次): {manifest_path}")
            df = pd.read_csv(manifest_path, sep="\t")
            logger.info(f"正在保存为 Parquet 以加快未来加载速度: {parquet_path}")
            df.to_parquet(parquet_path)
    except Exception as e:
        logger.error(f"加载 manifest 文件 {manifest_path} 时出错: {e}")
        return None

    if "slide_id" not in df.columns:
        logger.error(f"Manifest 文件 {manifest_path} 中缺少 'slide_id' 列。")
        return None

    if label_col not in df.columns:
        logger.error(f"Manifest 文件 {manifest_path} 中缺少标签列: '{label_col}'。")
        return None

    logger.info(f"Manifest 加载成功。总样本数 (原始): {len(df)}")

    # 2. 获取所有可用的 H5 文件的基本名称（不带 .h5）
    try:
        # print("available_files len:", len(os.listdir(h5_feats_path)))
        available_files = [
            f.replace(".h5", "") for f in os.listdir(h5_feats_path) if f.endswith(".h5")
        ]
        # print("available_files len:", len(available_files))
        available_files_set = set(available_files)
        # available_files_set = available_files
    except Exception as e:
        logger.error(f"读取特征路径 {h5_feats_path} 时出错: {e}")
        return None

    if not available_files_set:
        logger.error(f"特征路径 {h5_feats_path} 中未找到 .h5 文件。")
        return None

    # 3. 过滤 df，只保留那些 H5 文件确实存在的行
    original_count = len(df)

    # print(df["slide_id"])
    # print(available_files_set)

    df = df[df["slide_id"].isin(available_files_set)].copy()
    filtered_count = len(df)

    if original_count > filtered_count:
        logger.warning(
            f"过滤了 {original_count - filtered_count} 个样本，因为在 {h5_feats_path} 中找不到对应的 .h5 文件。"
        )

    if filtered_count == 0:
        logger.error("过滤后无可用样本。请检查 slide_id 和 H5 文件名是否匹配。")
        return None

    logger.info(f"Manifest 准备就绪。可用样本数 (过滤后): {filtered_count}")
    return df


def get_model(model_type: str, input_dim: int) -> nn.Module:
    """根据 model_type 实例化并返回模型"""
    # 新的域分类器
    # domain_classifier = DomainClassifier(
    #     input_dim=1024,
    #     hidden_dim=256
    # )

    if model_type == "abmil":
        return BinaryClassificationModel(input_feature_dim=input_dim)
    elif model_type == "dsmil":
        return DSMILModel(input_feature_dim=input_dim)
    elif model_type == "transmil":
        return TransMILModel()
    elif model_type == "hybrid_mil":
        return HybridAttnMILModel(input_feature_dim=input_dim)
    elif model_type == "linearprobe":
        return LinearProbeModel(input_feature_dim=input_dim)
    elif model_type == "TransMIL_CLSToken":
        return TransMIL_CLSToken(input_feature_dim=input_dim)
    elif model_type == "maemil":
        return MAEMILModel(input_feature_dim=input_dim)
    elif model_type == "MAEMILModel_CLSToken":
        return MAEMILModel_CLSToken(input_feature_dim=input_dim)
    elif model_type == "CLAMModel":
        return CLAMModel(input_feature_dim=input_dim)

    # elif model_type == "TransMIL_CLSToken":
    #     model = TransMIL_CLSToken(input_feature_dim=input_dim)
    #     return model,domain_classifier
    # elif model_type == "TransMIL_GatedAttention":
    #     return TransMIL_GatedAttention(input_feature_dim=input_dim)
    else:
        raise ValueError(f"未知的 model_type: {model_type}")


def get_dataloaders(
    df: pd.DataFrame,
    feats_path: str,
    fold_col: str,
    label_col: str,
    batch_size: int,
    seed: int,
    train_patches_sampled: int,
) -> Tuple[DataLoader, DataLoader]:
    """为指定折创建训练和验证 DataLoaders"""

    # 1. 创建训练数据集和加载器
    train_dataset = H5Dataset(
        feats_path,
        df,
        "train",
        fold_col,
        label_col,
        train_patches_sampled,
    )
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        worker_init_fn=lambda _: np.random.seed(seed),
    )

    # 2. 创建验证数据集和加载器
    # 注意：验证时我们使用 'test' 标签来加载 *所有* patches
    # (H5Dataset 内部逻辑)
    val_dataset = H5Dataset(
        feats_path,
        df,
        "test",
        fold_col,
        label_col,
        # -1,  # -1 表示加载所有 patches 用于验证
        train_patches_sampled,
    )
    # 验证时 batch_size 必须为 1
    val_loader = DataLoader(
        val_dataset,
        batch_size=1,
        shuffle=False,
        worker_init_fn=lambda _: np.random.seed(seed),
    )

    return train_loader, val_loader


def calculate_metrics_with_ci_pvalue(
    data_list: List[float], baseline_for_p_val: Optional[float] = None
) -> Tuple[float, float, Tuple[float, float], float]:
    """
    计算均值, 标准差, 95% CI, 和 (可选的) P值.
    返回: (mean, std, ci_95, p_value)
    """
    # 忽略 nanmean/nanstd 在空/全nan输入时可能产生的 RuntimeWarning
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=RuntimeWarning)

        if not data_list:
            # 如果列表为空
            return np.nan, np.nan, (np.nan, np.nan), np.nan

        # 转换为 numpy 数组并移除 NaNs
        data = np.array(data_list)
        data = data[~np.isnan(data)]

        if data.size == 0:
            # 如果移除 nan 后为空
            return np.nan, np.nan, (np.nan, np.nan), np.nan
        elif data.size == 1:
            # 只有一个有效点
            mean = np.mean(data)
            return mean, np.nan, (np.nan, np.nan), np.nan

        # 计算均值和标准差
        mean = np.mean(data)
        std = np.std(data)
        df = data.size - 1  # 自由度
        stderr = stats.sem(data)  # 标准误差

        # 95% CI
        if stderr > 0:
            # 使用 t 分布计算置信区间
            ci_95 = stats.t.interval(0.95, df, loc=mean, scale=stderr)
        else:
            # 如果标准误差为0 (例如所有值都相同), CI 就是均值
            ci_95 = (mean, mean)

        # P-value (可选)
        p_val = np.nan
        if baseline_for_p_val is not None:
            try:
                # 单样本 t 检验
                _, p_val = stats.ttest_1samp(data, baseline_for_p_val)
            except Exception as e:
                logging.getLogger().warning(f"计算 P 值时出错: {e}")
                p_val = np.nan

    return mean, std, ci_95, p_val
