import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import h5py
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import roc_auc_score
import logging
import sys
from trident.slide_encoder_models import ABMILSlideEncoder


# --- 日志配置 ---
log_file_path = "cross_val_training.log"
logging.getLogger().handlers = []
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(levelname)s - %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S",
    handlers=[
        logging.FileHandler(log_file_path, mode="w"),
        logging.StreamHandler(sys.stdout),
    ],
)
logger = logging.getLogger()
logger.info("日志系统已启动。")
# --- 日志配置结束 ---

import pandas as pd
import os

# 从您的本地 TSV 文件加载标签和拆分
split_file_path = "/data0/lcy/Patho-Bench/tools/zzylnm_slices_splits/splits/k=all.tsv"
parquet_path = split_file_path.replace(".tsv", ".parquet")

if os.path.exists(parquet_path):
    print(f"正在从更快的 Parquet 文件加载: {parquet_path}")
    df = pd.read_parquet(parquet_path)
else:
    print(f"首次加载 TSV 文件（可能需要一些时间）: {split_file_path}")
    df = pd.read_csv(split_file_path, sep="\t")
    print(f"正在保存为 Parquet 格式以加快未来加载速度: {parquet_path}")
    df.to_parquet(parquet_path)

# 1. 获取所有可用的 H5 文件的基本名称（不带 .h5）
feats_path = "/data0/lcy/data/LNM/LNM_slices_conch_v1_processed/20x_512px_0px_overlap/features_conch_v1"
available_files = [
    f.replace(".h5", "") for f in os.listdir(feats_path) if f.endswith(".h5")
]
available_files_set = set(available_files)

# 2. 检查 df 中有多少 ID 是缺失的
df_ids = set(df["slide_id"].unique())
missing_ids = df_ids - available_files_set

if missing_ids:
    print(
        f"警告：在 df 中找到了 {len(missing_ids)} 个 slide_id，但在 feats_path 中没有对应的 .h5 文件。"
    )
    print("将从 df 中过滤掉这些缺失的 ID。")
    # print(f"缺失的 ID 示例: {list(missing_ids)[:5]}") # 取消注释以查看示例

# 3. 过滤 df，只保留那些 H5 文件确实存在的行
df = df[df["slide_id"].isin(available_files_set)].copy()

# 根据您的文件内容，标签列是 'label'
task_label_col = "label"

# 检查标签分布
df_counts = df[task_label_col].value_counts().reset_index()
df_counts.columns = [task_label_col, "Count"]
df_counts

# --- 配置 ---
task_label_col = "label"
feature_dim = 512
fold_columns = ["fold_0", "fold_1", "fold_2", "fold_3", "fold_4"]

# *** 新增：模型保存目录 ***
save_dir = "saved_models"
os.makedirs(save_dir, exist_ok=True)  # 确保目录存在
# ---------------------

# 设置确定性行为
SEED = 1234
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
logger.info(f"随机种子设置为 {SEED}")


class BinaryClassificationModel(nn.Module):
    # (模型定义与之前相同)
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_heads=1,
        head_dim=512,
        dropout=0.0,
        gated=True,
        hidden_dim=256,
    ):
        super().__init__()
        self.feature_encoder = ABMILSlideEncoder(
            freeze=False,
            input_feature_dim=input_feature_dim,
            n_heads=n_heads,
            head_dim=head_dim,
            dropout=dropout,
            gated=gated,
        )
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x, return_raw_attention=False):
        if return_raw_attention:
            features, attn = self.feature_encoder(x, return_raw_attention=True)
        else:
            features = self.feature_encoder(x)
        logits = self.classifier(features).squeeze(1)

        if return_raw_attention:
            return logits, attn

        return logits


# 初始化设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info(f"使用设备: {device}")


# 训练/验证使用的数据集
class H5Dataset(Dataset):
    def __init__(self, feats_path, df, split, split_col, num_features=512):
        self.df = df[df[split_col] == split]
        self.feats_path = feats_path
        self.num_features = num_features
        self.split = split

    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        row = self.df.iloc[idx]
        with h5py.File(
            os.path.join(self.feats_path, row["slide_id"] + ".h5"), "r"
        ) as f:
            features = torch.from_numpy(f["features"][:])

        if self.split == "train":
            # (训练时的采样逻辑)
            num_available = features.shape[0]
            if num_available >= self.num_features:
                indices = torch.randperm(
                    num_available, generator=torch.Generator().manual_seed(SEED)
                )[: self.num_features]
            else:
                indices = torch.randint(
                    num_available,
                    (self.num_features,),
                    generator=torch.Generator().manual_seed(SEED),
                )
            features = features[indices]

        label = torch.tensor(row[task_label_col], dtype=torch.float32)
        return features, label


# --- 交叉验证循环 ---
feats_path = "/data0/lcy/data/LNM/LNM_slices_conch_v1_processed/20x_512px_0px_overlap/features_conch_v1"
batch_size = 8
num_epochs = 20

logger.info(f"特征路径: {feats_path}")
logger.info(f"Batch Size: {batch_size}, Epochs: {num_epochs}")

all_fold_aucs = []
all_fold_accuracies = []

if "df" not in locals():
    logger.error("=" * 50)
    logger.error("错误: DataFrame 'df' 未定义。")
    logger.error("=" * 50)
else:
    for fold_idx, current_split_col in enumerate(fold_columns):
        logger.info(f"\n{'='*50}")
        logger.info(
            f"--- 开始第 {fold_idx + 1}/{len(fold_columns)} 折 (使用列: {current_split_col}) ---"
        )
        logger.info(f"{'='*50}")

        # 1. 重新初始化模型和优化器
        model = BinaryClassificationModel().to(device)
        optimizer = optim.Adam(model.parameters(), lr=4e-4)
        criterion = nn.BCEWithLogitsLoss()

        # 2. 创建 DataLoaders
        train_loader = DataLoader(
            H5Dataset(feats_path, df, "train", split_col=current_split_col),
            batch_size=batch_size,
            shuffle=True,
            worker_init_fn=lambda _: np.random.seed(SEED),
        )
        test_loader = DataLoader(
            H5Dataset(feats_path, df, "test", split_col=current_split_col),
            batch_size=1,
            shuffle=False,
            worker_init_fn=lambda _: np.random.seed(SEED),
        )
        logger.info(
            f"Fold {fold_idx + 1}: Train samples = {len(train_loader.dataset)}, Test samples = {len(test_loader.dataset)}"
        )

        # 3. 训练循环
        for epoch in range(num_epochs):
            model.train()
            total_loss = 0.0
            for features, labels in train_loader:
                features, labels = {"features": features.to(device)}, labels.to(device)
                optimizer.zero_grad()
                outputs = model(features)
                loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()
                total_loss += loss.item()
            logger.info(
                f"Fold {fold_idx + 1}, Epoch {epoch+1}/{num_epochs}, Loss: {total_loss/len(train_loader):.4f}"
            )

        # 4. 评估
        model.eval()
        all_labels, all_outputs = [], []
        correct = 0
        total = 0

        with torch.no_grad():
            for features, labels in test_loader:
                features, labels = {"features": features.to(device)}, labels.to(device)
                outputs = model(features)
                predicted = (outputs > 0).float()
                correct += (predicted == labels).sum().item()
                total += labels.size(0)
                all_outputs.append(outputs.cpu().numpy())
                all_labels.append(labels.cpu().numpy())

        # 5. 计算指标
        all_outputs = np.concatenate(all_outputs)
        all_labels = np.concatenate(all_labels)

        auc = np.nan
        if len(np.unique(all_labels)) > 1:
            auc = roc_auc_score(all_labels, all_outputs)
        else:
            logger.warning(
                f"Fold {fold_idx + 1} Warning: Test set only contains one class. AUC cannot be calculated."
            )

        accuracy = correct / total

        logger.info(f"--- Fold {fold_idx + 1} 结果 ---")
        logger.info(f"Test AUC: {auc:.4f}")
        logger.info(f"Test Accuracy: {accuracy:.4f}")

        all_fold_aucs.append(auc)
        all_fold_accuracies.append(accuracy)

        # *** 6. 新增：保存模型 ***
        model_save_path = os.path.join(save_dir, f"model_{current_split_col}.pth")
        torch.save(model.state_dict(), model_save_path)
        logger.info(f"Model for fold {fold_idx + 1} saved to: {model_save_path}")

    # --- 交叉验证总结 ---
    logger.info(f"\n{'='*50}")
    logger.info("--- 5-折交叉验证总结 ---")
    logger.info(f"{'='*50}")

    mean_auc = np.nanmean(all_fold_aucs)
    std_auc = np.nanstd(all_fold_aucs)
    mean_accuracy = np.nanmean(all_fold_accuracies)
    std_accuracy = np.nanstd(all_fold_accuracies)

    logger.info(f"平均 AUC: {mean_auc:.4f} \u00b1 {std_auc:.4f}")
    logger.info(f"平均 Accuracy: {mean_accuracy:.4f} \u00b1 {std_accuracy:.4f}")
    logger.info(f"\n日志已保存到 {log_file_path}")


# logger.info("\n--- 开始运行外部测试示例 ---")

# # 1. 定义外部测试集的信息
# external_feats_path = "/data0/lcy/data/LNM/LNM_Zhujiang_conchv1_processed/20x_512px_0px_overlap/features_conch_v1"
# external_df_path = "/data0/lcy/Patho-Bench/tools/zzylnm_zhujiang_splits/cohort.tsv"
# external_label_col = "label" # 确保这是你外部TSV中的正确列名

# # 2. 选择一个已保存的模型进行测试 (例如，使用 'fold_0' 训练的模型)
# model_to_test_path = os.path.join(save_dir, "model_fold_0.pth")

# # 3. 加载外部 DataFrame
# try:
#     external_df = pd.read_csv(external_df_path, sep="\t")
#     logger.info(f"成功加载外部 manifest: {external_df_path}")

#     # 4. 检查文件和 DF 是否存在
#     if not os.path.exists(model_to_test_path):
#         logger.error(f"找不到模型文件: {model_to_test_path}")
#     elif not os.path.exists(external_feats_path):
#         logger.error(f"找不到外部特征路径: {external_feats_path}")
#     else:
#         # 5. 运行测试函数
#         test_external_dataset(
#             model_path=model_to_test_path,
#             external_df=external_df,
#             feats_path=external_feats_path,
#             device=device,
#             task_label_col=external_label_col
#         )

# except FileNotFoundError:
#     logger.error(f"找不到外部 manifest 文件: {external_df_path}")
# except Exception as e:
#     logger.error(f"加载外部 manifest 时出错: {e}")
