import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import h5py
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import roc_auc_score
import logging
import sys
from trident.slide_encoder_models import ABMILSlideEncoder
from tqdm import tqdm
from pathlib import Path

# --- 导入传统 ML 库 ---
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score

os.environ["CUDA_VISIBLE_DEVICES"] = "1"  # 设置可见的GPU为0和2

# --- 导入结束 ---


# --- 日志配置 ---
log_file_path = "multimodal_cross_val_training2.log"
logging.getLogger().handlers = []
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(levelname)s - %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S",
    handlers=[
        logging.FileHandler(log_file_path, mode="w"),
        logging.StreamHandler(sys.stdout),
    ],
)
logger = logging.getLogger()
logger.info("日志系统已启动。")
# --- 日志配置结束 ---


# --- 数据加载 (与之前相同) ---
split_file_path = "/data0/lcy/Patho-Bench/tools/zzylnm_slices_splits/splits/k=all.tsv"
parquet_path = split_file_path.replace(".tsv", ".parquet")

if os.path.exists(parquet_path):
    logger.info(f"正在从更快的 Parquet 文件加载: {parquet_path}")
    df = pd.read_parquet(parquet_path)
else:
    logger.info(f"首次加载 TSV 文件（可能需要一些时间）: {split_file_path}")
    df = pd.read_csv(split_file_path, sep="\t")
    logger.info(f"正在保存为 Parquet 格式以加快未来加载速度: {parquet_path}")
    df.to_parquet(parquet_path)

# 模态1 (WSI) 特征路径
feats_path_mod1 = "/data0/lcy/data/LNM/LNM_slices_conch_v1_processed/20x_512px_0px_overlap/features_conch_v1"
available_files_mod1 = [
    f.replace(".h5", "") for f in os.listdir(feats_path_mod1) if f.endswith(".h5")
]
available_files_mod1_set = set(available_files_mod1)

# 过滤 df，只保留那些 模态1 H5 文件确实存在的行
initial_count = len(df["slide_id"].unique())
df = df[df["slide_id"].isin(available_files_mod1_set)].copy()
filtered_count = len(df["slide_id"].unique())
logger.info(
    f"模态1 (WSI) 过滤：从 {initial_count} 个 slide_id 减少到 {filtered_count} 个 (因为在 {feats_path_mod1} 中找到了 .h5 文件)"
)


# --- 模态2 (3D VISTA) 辅助函数 ---
def build_vista3d_map(vista_root_dir):
    """
    扫描 VISTA 3D 预测根目录, 构建一个 slide_id -> h5_file_path 的映射。
    假定目录结构为: {root}/{slide_id}_0000/{slide_id}_0000_pred.h5
    """
    logger.info(f"开始构建模态2 (3D VISTA) 路径映射... 根目录: {vista_root_dir}")
    mod2_path_map = {}
    root_dir = Path(vista_root_dir)

    for case_dir in root_dir.iterdir():
        if not case_dir.is_dir():
            continue

        # 假设 case_dir 名称格式为 {slide_id}_0000
        if not case_dir.name.endswith("_0000"):
            logger.warning(f"跳过格式不正确的目录: {case_dir.name}")
            continue

        slide_id = case_dir.name.split("_0000")[0]

        # 查找 *_pred.h5 文件
        pred_files = list(case_dir.glob("*_pred.h5"))

        if not pred_files:
            # logger.warning(f"在 {case_dir} 中未找到 *_pred.h5 文件")
            continue

        if len(pred_files) > 1:
            logger.warning(
                f"在 {case_dir} 中有多个 *_pred.h5 文件, 仅使用第一个: {pred_files[0]}"
            )

        mod2_path_map[slide_id] = pred_files[0]

    logger.info(
        f"模态2 (3D VISTA) 映射构建完毕。共找到 {len(mod2_path_map)} 个对应的 .h5 文件。"
    )
    return mod2_path_map


# --- 模态2 (3D VISTA) 路径 ---
feats_path_mod2_root = "/data0/lcy/data/LNM/vista3d/prediction"
vista_3d_map = build_vista3d_map(feats_path_mod2_root)

# *** 再次过滤 df ***
# 确保 df 中的 slide_id 在两个模态中都存在
available_files_mod2_set = set(vista_3d_map.keys())
# 需要匹配的是 case_id
df = df[df["case_id"].isin(available_files_mod2_set)].copy()
final_count = len(df["case_id"].unique())
logger.info(
    f"多模态过滤：从 {filtered_count} 个 slide_id 减少到 {final_count} 个 (因为在 模态2 中也找到了 .h5 文件)"
)

# 检查标签分布
task_label_col = (
    "label"  # 在这里定义，因为 df_counts 和 create_aggregated_features 都要用
)
df_counts = df[task_label_col].value_counts().reset_index()
df_counts.columns = [task_label_col, "Count"]
logger.info(f"最终标签分布:\n{df_counts}")


# --- 配置 ---
# task_label_col = "label" # 已上移
fold_columns = ["fold_0", "fold_1", "fold_2", "fold_3", "fold_4"]
save_dir = "saved_multimodal_models"
os.makedirs(save_dir, exist_ok=True)

# 模态1 (WSI) 配置
MOD1_FEAT_DIM = 512  # WSI patch 特征维度 (Conch v1)
# MOD1_NUM_INSTANCES = 512 # (不再需要, 因为我们使用 full bag)

# 模态2 (3D VISTA) 配置
MOD2_FEAT_DIM = 3  # 我们将 (512, 512, D) 聚合为 (D, 3) 特征 (mean, max, std)
# MOD2_NUM_INSTANCES = 128 # (不再需要, 因为我们使用 full bag)

# 设置确定性行为
SEED = 42
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
logger.info(f"随机种子设置为 {SEED}")


# --- 新的多模态数据集 ---
class MultiModalH5Dataset(Dataset):
    def __init__(
        self,
        df,
        split,
        split_col,
        mod1_feats_path,
        mod2_path_map,
        mod1_num_instances=None,  # (不再使用)
        mod2_num_instances=None,
    ):  # (不再使用)

        self.df = df[df[split_col] == split]
        self.split = split

        # 模态1 (WSI)
        self.mod1_feats_path = mod1_feats_path
        # self.mod1_num_instances = mod1_num_instances # (不再使用)

        # 模态2 (3D VISTA)
        self.mod2_path_map = mod2_path_map
        # self.mod2_num_instances = mod2_num_instances # (不再使用)
        self.mod2_feat_dim = MOD2_FEAT_DIM  # (e.g., 3 for mean, max, std)

        self.generator = torch.Generator().manual_seed(SEED)

    def __len__(self):
        return len(self.df)

    def _load_mod1_features(self, slide_id):
        """加载 WSI patch 特征 (模态1) - 返回 FULL BAG"""
        try:
            with h5py.File(
                os.path.join(self.mod1_feats_path, slide_id + ".h5"), "r"
            ) as f:
                features = torch.from_numpy(f["features"][:])  # (N, 512)

            num_available = features.shape[0]
            if num_available == 0:
                # 返回 (1, 512) 零张量, 以防 bag 为空
                logger.warning(f"模态1 (WSI) {slide_id}.h5 为空, 返回零张量。")
                return torch.zeros(1, MOD1_FEAT_DIM).float()

            # (已移除) 不再根据 self.split == "train" 进行采样

            return features.float()

        except Exception as e:
            logger.warning(f"加载模态1 (WSI) 失败: {slide_id}.h5, 错误: {e}")
            # 返回 (1, 512) 零张量
            return torch.zeros(1, MOD1_FEAT_DIM).float()

    def _load_mod2_features(self, slide_id):
        """加载 3D VISTA 特征 (模态2) - 返回 FULL BAG"""
        mod2_path = self.mod2_path_map.get(slide_id)

        if not mod2_path:
            logger.warning(f"未找到模态2 (3D VISTA) 映射: {slide_id}")
            # 返回 (1, 3) 零张量
            return torch.zeros(1, self.mod2_feat_dim).float()

        try:
            with h5py.File(mod2_path, "r") as f:
                # data shape: (1, 512, 512, D)
                data = f["prediction"][:].squeeze(0)  # -> (512, 512, D)

                if data.ndim != 3 or data.shape[0] != 512 or data.shape[1] != 512:
                    logger.warning(
                        f"模态2 (3D VISTA) {slide_id} 形状不正确: {data.shape}"
                    )
                    # 返回 (1, 3) 零张量
                    return torch.zeros(1, self.mod2_feat_dim).float()

                # 将 (512, 512, D) 聚合为 (D, 3)
                # (D 个实例, 每个实例有 3 个特征)
                slice_means = np.mean(data, axis=(0, 1))  # (D,)
                slice_maxs = np.max(data, axis=(0, 1))  # (D,)
                slice_stds = np.std(data, axis=(0, 1))  # (D,)

                # (D, 3)
                features = np.stack([slice_means, slice_maxs, slice_stds], axis=-1)
                features = torch.from_numpy(features)

            num_available = features.shape[0]
            if num_available == 0:
                logger.warning(f"模态2 (3D VISTA) {slide_id} 聚合后为空, 返回零张量。")
                # 返回 (1, 3) 零张量
                return torch.zeros(1, self.mod2_feat_dim).float()

            # (已移除) 不再根据 self.split == "train" 进行采样

            return features.float()

        except Exception as e:
            logger.warning(
                f"加载模态2 (3D VISTA) 失败: {slide_id} (at {mod2_path}), 错误: {e}"
            )
            # 返回 (1, 3) 零张量
            return torch.zeros(1, self.mod2_feat_dim).float()

    def __getitem__(self, idx):
        row = self.df.iloc[idx]
        slide_id = row["slide_id"]
        case_id = row["case_id"]

        # 加载模态1 (WSI) - (N, 512)
        features_mod1 = self._load_mod1_features(slide_id)

        # 加载模态2 (3D VISTA) - (D, 3)
        features_mod2 = self._load_mod2_features(case_id)

        label = torch.tensor(row[task_label_col], dtype=torch.float32)

        return features_mod1, features_mod2, label


# --- 新的多模态 MIL 模型 ---
class MultiModalABMIL(nn.Module):
    def __init__(
        self,
        mod1_input_dim=MOD1_FEAT_DIM,  # 512
        mod1_encoder_dim=512,  # ABMIL 输出 512
        mod2_input_dim=MOD2_FEAT_DIM,  # 3
        mod2_encoder_dim=64,  # ABMIL 输出 64
        n_heads=1,
        dropout=0.0,
        gated=True,
        hidden_dim=256,
    ):
        super().__init__()

        # 模态1 (WSI) 编码器 (不变)
        self.feature_encoder_mod1 = ABMILSlideEncoder(
            freeze=False,
            input_feature_dim=mod1_input_dim,
            n_heads=n_heads,
            head_dim=mod1_encoder_dim,
            dropout=dropout,
            gated=gated,
        )

        # --- [ 新增 ] 模态2的手动投射层 ---
        # 将 (B, N, 3) 转换为 (B, N, 64)
        self.mod2_projection = nn.Linear(mod2_input_dim, mod2_encoder_dim)
        logger.info(f"为模态2添加了手动投射层: {mod2_input_dim} -> {mod2_encoder_dim}")
        # --- [ 新增结束 ] ---

        # 模态2 (3D VISTA) 编码器
        self.feature_encoder_mod2 = ABMILSlideEncoder(
            freeze=False,
            # --- [ !! 关键修复 !! ] ---
            # 告诉编码器, 输入维度 *将是* 64 (来自我们的投射层)
            # 而不是 mod2_input_dim (3)
            input_feature_dim=mod2_encoder_dim,
            # --- [ 修复结束 ] ---
            n_heads=n_heads,
            head_dim=mod2_encoder_dim,  # head_dim 保持 64
            dropout=dropout,
            gated=gated,
        )

        # 融合与分类头 (不变)
        fused_dim = mod1_encoder_dim + mod2_encoder_dim  # 512 + 64 = 576
        self.classifier = nn.Sequential(
            nn.Linear(fused_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(p=max(0.1, dropout)),
            nn.Linear(hidden_dim, 1),
        )


    def forward(self, x_mod1, x_mod2, return_raw_attention=False):
        # x_mod1: (B=1, N1, D1=512)
        # x_mod2: (B=1, N2, D2=3)

        # --- [ !! 关键修复 !! ] ---
        # 在送入编码器之前, 手动将 (B, N2, 3) 投射到 (B, N2, 64)
        x_mod2_projected = self.mod2_projection(x_mod2)
        # --- [ 修复结束 ] ---

        # ABMILSlideEncoder 期望一个字典
        batch_mod1 = {"features": x_mod1}
        # --- [ !! 关键修复 !! ] ---
        # 使用投射后的特征
        batch_mod2 = {"features": x_mod2_projected}
        # --- [ 修复结束 ] ---

        # 编码模态1 (B=1, 512)
        features_mod1 = self.feature_encoder_mod1(batch_mod1, return_raw_attention=False)

        # 编码模态2 (B=1, 64)
        # 现在编码器接收 (B, N2, 64) 并且期望 64 维输入, 匹配
        features_mod2 = self.feature_encoder_mod2(batch_mod2, return_raw_attention=False)

        # 融合特征 (B=1, 512 + 64 = 576)
        fused_features = torch.cat([features_mod1, features_mod2], dim=1)

        # 分类 (期望 576)
        logits = self.classifier(fused_features).squeeze(1)  # (B=1,)

        if return_raw_attention:
            pass

        return logits


# 初始化设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info(f"使用设备: {device}")


# --- 交叉验证循环 (多模态 MIL) ---
# (关键改动)
# 由于我们加载可变大小的 full bags, DataLoader 必须使用 batch_size=1
batch_size = 1
num_epochs = 20
learning_rate = 1e-4

logger.info(f"\n{'='*60}")
logger.info("--- 开始 5-折交叉验证 (多模态 ABMIL) ---")
logger.info(f"{'='*60}")
logger.info(f"特征路径 (WSI): {feats_path_mod1}")
logger.info(f"特征路径 (3D VISTA): {feats_path_mod2_root}")
logger.info(
    f"Batch Size: {batch_size} (使用 Full Bags), Epochs: {num_epochs}, LR: {learning_rate}"
)

all_fold_aucs_mil = []
all_fold_accuracies_mil = []

for fold_idx, current_split_col in enumerate(fold_columns):
    logger.info(
        f"\n--- (MIL) 开始第 {fold_idx + 1}/{len(fold_columns)} 折 ({current_split_col}) ---"
    )

    # 1. 创建数据集和数据加载器
    # 注意：我们在这里传入了两种模态的路径/映射
    train_dataset = MultiModalH5Dataset(
        df=df,
        split="train",
        split_col=current_split_col,
        mod1_feats_path=feats_path_mod1,
        mod2_path_map=vista_3d_map,
    )
    test_dataset = MultiModalH5Dataset(
        df=df,
        split="test",
        split_col=current_split_col,
        mod1_feats_path=feats_path_mod1,
        mod2_path_map=vista_3d_map,
    )

    # (注意: 由于 bag size 可变, 训练和测试都必须使用 batch_size=1)
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=4,
        pin_memory=True,
    )
    test_loader = DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=4,
        pin_memory=True,
    )

    logger.info(
        f"Fold {fold_idx + 1}: Train samples = {len(train_dataset)}, Test samples = {len(test_dataset)}"
    )

    # 2. 初始化模型、损失函数和优化器
    model = MultiModalABMIL().to(device)
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)

    best_test_auc = 0.0

    # 3. 训练循环
    for epoch in range(num_epochs):
        model.train()
        total_loss = 0
        train_preds = []
        train_labels = []

        # 使用 tqdm 显示训练进度
        train_pbar = tqdm(
            train_loader, desc=f"Epoch {epoch + 1}/{num_epochs} [Train]", leave=False
        )
        for features_mod1, features_mod2, labels in train_pbar:
            # (此时 batch_size=1)
            features_mod1 = features_mod1.to(device)
            features_mod2 = features_mod2.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()
            logits = model(features_mod1, features_mod2)
            loss = criterion(logits, labels)
            loss.backward()
            optimizer.step()

            total_loss += loss.item()
            train_preds.extend(torch.sigmoid(logits).detach().cpu().numpy())
            train_labels.extend(labels.cpu().numpy())

            train_pbar.set_postfix(loss=loss.item())

        avg_train_loss = total_loss / len(train_loader)

        # 计算训练集 AUC (如果需要)
        # train_auc = np.nan
        # if len(np.unique(train_labels)) > 1:
        #     train_auc = roc_auc_score(train_labels, train_preds)
        # logger.info(f"Epoch {epoch + 1}: Avg Train Loss = {avg_train_loss:.4f}, Train AUC = {train_auc:.4f}")

    # 4. 评估循环 (在最后一个 epoch 后)
    model.eval()
    test_preds = []
    test_labels = []

    test_pbar = tqdm(
        test_loader, desc=f"Epoch {epoch + 1}/{num_epochs} [Test]", leave=False
    )
    with torch.no_grad():
        for features_mod1, features_mod2, labels in test_pbar:
            # (此时 batch_size=1)
            features_mod1 = features_mod1.to(device)
            features_mod2 = features_mod2.to(device)
            labels = labels.to(device)

            logits = model(features_mod1, features_mod2)
            test_preds.extend(torch.sigmoid(logits).cpu().numpy())
            test_labels.extend(labels.cpu().numpy())

    # 5. 计算指标
    test_auc = np.nan
    test_acc = np.nan

    if len(np.unique(test_labels)) > 1:
        test_auc = roc_auc_score(test_labels, test_preds)
    else:
        logger.warning(f"Fold {fold_idx + 1}: Test set 只有一个类别, 无法计算 AUC。")

    # 计算 Accuracy
    test_preds_class = (np.array(test_preds) > 0.5).astype(int)
    test_acc = accuracy_score(test_labels, test_preds_class)

    logger.info(
        f"--- Fold {fold_idx + 1} (MIL) 结果: Test AUC = {test_auc:.4f}, Test Accuracy = {test_acc:.4f} ---"
    )
    all_fold_aucs_mil.append(test_auc)
    all_fold_accuracies_mil.append(test_acc)

    # 6. 保存最佳模型 (可选, 这里我们只保存在最后一个 epoch)
    if test_auc > best_test_auc:  # (在这个简易循环中, 这总是 true)
        best_test_auc = test_auc
        model_save_path = os.path.join(
            save_dir, f"multimodal_model_fold_{fold_idx + 1}.pth"
        )
        torch.save(model.state_dict(), model_save_path)
        logger.info(f"模型已保存至: {model_save_path}")

# --- 多模态 MIL 交叉验证总结 ---
logger.info(f"\n{'='*50}")
logger.info("--- 多模态 ABMIL 5-折交叉验证总结 ---")
logger.info(f"{'='*50}")

mean_auc_mil = np.nanmean(all_fold_aucs_mil)
std_auc_mil = np.nanstd(all_fold_aucs_mil)
mean_acc_mil = np.nanmean(all_fold_accuracies_mil)
std_acc_mil = np.nanstd(all_fold_accuracies_mil)

logger.info(f"模型: MultiModalABMIL (WSI + 3D VISTA)")
logger.info(f"  平均 AUC: {mean_auc_mil:.4f} \u00b1 {std_auc_mil:.4f}")
logger.info(f"  平均 Accuracy: {mean_acc_mil:.4f} \u00b1 {std_acc_mil:.4f}")
logger.info("-" * 20)



logger.info("--- 所有分析完成 ---")
