"""
核心训练与评估逻辑：
- train_one_epoch: 在训练集上训练一个 epoch
- evaluate_model: 在验证/测试集上评估模型
"""

import math
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
import logging
from typing import Tuple, Optional
from sklearn.metrics import roc_auc_score  


# 假设这些是您本地的模块
try:
    from trident.fpt.models.mil_models_domain import (
        BinaryClassificationModel,
        DSMILModel,
        # TransMILModel,
        HybridAttnMILModel,
        LinearProbeModel,
        MAEMILModel,
        CLAMModel,
        grad_reverse,
    )
except ImportError:
    print(
        "Warning: Could not perform relative imports in core.py. "
        "Assuming models are in the same path or PYTHONPATH."
    )
    from ..models.mil_models_domain import (
        BinaryClassificationModel,
        DSMILModel,
        # TransMILModel,
        HybridAttnMILModel,
        LinearProbeModel,
        MAEMILModel,
        CLAMModel,
        grad_reverse
    )

import torch
import torch.nn as nn
import torch.nn.functional as F


def train_one_epoch(
    model: nn.Module,
    loader: DataLoader,
 
    optimizer: optim.Optimizer,
    criterion: nn.Module,  # 这个 criterion 现在可以是 BCEWithLogitsLoss 或我们的 CLAMLoss
    device: torch.device,
 
) -> float:
    """
    在训练集上训练一个 epoch。
    此版本现在与 CLAMModel 和其他模型兼容。
    """
    model.train()

    total_loss = 0.0

    for features_batch, labels_batch in loader:
        features_dict = {"features": features_batch.to(device)}
        # print(features_dict["features"].shape)
        labels = labels_batch.to(device)

        optimizer.zero_grad()

        # --- 关键修改 ---
        if isinstance(model, CLAMModel):
            # 1. CLAM 情况
            # model() 返回一个字典
            results_dict = model(features_dict)

            # criterion (CLAMLoss) 期望字典和标签
            # 确保 labels 是 float 类型以用于 BCE
            loss = criterion(results_dict, labels.float())

        else:
            # 2. 其他模型 (ABMIL, DSMIL, etc.)
            # model() 返回 logits 张量 [B]
            logits = model(features_dict)

            # criterion (BCEWithLogitsLoss) 期望 logits 和标签
            # 确保 labels 是 float 类型以用于 BCE
            loss = criterion(logits, labels.float())
        # --- 修改结束 ---

        loss.backward()
        optimizer.step()
        total_loss += loss.item()

    return total_loss / len(loader)


def train_domain_one_epoch(
    model: nn.Module,
    loader: DataLoader,           # Source Loader (带标签的内部数据)
    optimizer: optim.Optimizer,
    criterion: nn.Module,         # 任务损失 (BCEWithLogitsLoss 或 CLAMLoss)
    device: torch.device,
    domain_classifier: nn.Module, # 域分类器 (DANN)
    domain_loader: DataLoader,    # Target Loader (无标签的外部数据)
    domain_criterion: nn.Module,  # 域损失 (BCEWithLogitsLoss)
    epoch: int,                   # <--- [新增] 当前 epoch
    n_epochs: int,                # <--- [新增] 总 epoch 数
    len_dataloader: int,          # <--- [新增] loader 的长度
    lambda_domain: float = 0.5    # <--- [新增] 域损失的权重
) -> float:
    """
    在训练集上训练一个 epoch，集成了 DANN (UDA) 逻辑。
    
    loader: Source (内部) 数据加载器
    domain_loader: Target (外部) 数据加载器
    """
    loader_target = domain_loader
    model.train()
    domain_classifier.train()
    iter_target = iter(loader_target)
    
    total_task_loss = 0.0
    total_domain_loss = 0.0
    total_combined_loss = 0.0

    # i 是 batch 索引
    for i, (features_batch, labels_batch) in enumerate(loader):
        
        # --- 1. 准备 Source (S) 数据 ---
        features_dict_S = {"features": features_batch.to(device)}
        labels_S = labels_batch.to(device).float() # 确保是 float
        B_S = features_batch.shape[0]
        # --- [新增] 标签平滑 ---
        smoothing_alpha = 0.1 # 这是一个超参数，0.1 或 0.15 通常很好
        labels_S = labels_S * (1.0 - smoothing_alpha) + smoothing_alpha / 2.0
        # --- 2. 准备 Target (T) 数据 ---
        try:
            features_batch_T = next(iter_target)[0] # [0] 是因为 loader 通常返回 (features, dummy_label)
        except StopIteration:
            iter_target = iter(loader_target)
            features_batch_T = next(iter_target)[0]
        
        features_dict_T = {"features": features_batch_T.to(device)}
        B_T = features_batch_T.shape[0]

        # --- 3. 计算 DANN 的 alpha ---
        p = float(i + epoch * len_dataloader) / n_epochs / len_dataloader
        alpha = 2. / (1. + math.exp(-10. * p)) - 1
        
        # --- 4. 梯度清零 ---
        optimizer.zero_grad()

        # --- 5. 任务损失 (L_task) 和 源域特征 (bag_feature_S) ---
        if isinstance(model, CLAMModel):
            # 1. CLAM 情况
            results_dict_S = model(features_dict_S)
            loss_task = criterion(results_dict_S, labels_S)
            
            # [!! 关键假设 !!] 假设 CLAM 返回的字典中有 'bag_feature'
            if "bag_feature" not in results_dict_S:
                raise ValueError("CLAMModel 的 results_dict 必须包含 'bag_feature' 键才能用于 DANN")
            bag_feature_S = results_dict_S["bag_feature"]

        else:
            # 2. 其他模型 (例如 TransMIL_CLSToken)
            # [!! 关键假设 !!] 假设模型返回 (logits, bag_feature)
            model_output = model(features_dict_S)
            if not (isinstance(model_output, tuple) and len(model_output) == 2):
                raise ValueError(f"模型 {model.__class__.__name__} 必须返回 (logits, bag_feature) 才能用于 DANN")
                
            task_logits_S, bag_feature_S = model_output
            task_logits_S = task_logits_S.to(device)
            labels_S = labels_S.to(device)
            loss_task = criterion(task_logits_S, labels_S)

        # --- 6. 目标域特征 (bag_feature_T) ---
        if isinstance(model, CLAMModel):
            results_dict_T = model(features_dict_T)
            bag_feature_T = results_dict_T["bag_feature"]
        else:
            _ , bag_feature_T = model(features_dict_T) # 忽略 Target 的 logits

        # --- 7. 域适应损失 (L_domain) ---
        
        # 合并 S 和 T 的特征
        bag_feature_combined = torch.cat((bag_feature_S, bag_feature_T), dim=0)
        
        # 创建域标签: 0=Source, 1=Target
        domain_labels_S = torch.zeros(B_S, device=device).float()
        domain_labels_T = torch.ones(B_T, device=device).float()
        domain_labels = torch.cat((domain_labels_S, domain_labels_T), dim=0)
        
        # 应用 GRL
        reversed_features = grad_reverse(bag_feature_combined, alpha)
        domain_logits = domain_classifier(reversed_features).squeeze(1) # [B_S+B_T]
        
        loss_domain = domain_criterion(domain_logits, domain_labels)

        # --- 8. 总损失和反向传播 ---
        loss_total = loss_task + lambda_domain * loss_domain
        
        loss_total.backward()
        optimizer.step()
        
        # --- 9. 记录损失 ---
        total_task_loss += loss_task.item()
        total_domain_loss += loss_domain.item()
        total_combined_loss += loss_total.item()

    # 返回平均总损失
    avg_loss = total_combined_loss / len(loader)
    
    # (可选) 您也可以打印更详细的损失
    # print(f"Epoch {epoch+1} Avg Losses | Total: {avg_loss:.4f} | Task: {total_task_loss / len(loader):.4f} | Domain: {total_domain_loss / len(loader):.4f}")
    
    return avg_loss


def evaluate_model(
    model: nn.Module,
    loader: DataLoader,
    device: torch.device,
    criterion: Optional[nn.Module] = None,
    use_tta: bool = False,
    n_tta_runs: int = 5,
    tta_type: str = 'patch_dropout', # 'patch_dropout' or 'noise'
    tta_param: float = 0.1
) -> Tuple[float, float, float]:
    """
    在验证集或测试集上评估模型，并可选择使用测试时增强 (TTA)。
    loader 的 batch_size 应为 1。

    [新增 TTA 功能]
    - use_tta: 是否启用 TTA
    - n_tta_runs: TTA 运行次数 (1 次原始 + (n-1) 次增强)
    - tta_type: 'patch_dropout' 或 'noise'
    - tta_param: 对应 tta_type 的参数 (patch drop rate 或 noise level)

    返回: (AUC, Accuracy, Avg_Loss)
    """
    model.eval()  # 确保处于评估模式
    all_labels, all_logits = [], []
    correct, total = 0, 0
    total_loss = 0.0
    logger = logging.getLogger()

    with torch.no_grad():
        # loader 的 B_S 假定为 1
        for features_slide_cpu, labels_slide_cpu in loader:

            # [1] (BS=1)
            labels = labels_slide_cpu.to(device)
            # [1, N, D] (BS=1)
            B, N, D = features_slide_cpu.shape 
            # print(features_slide_cpu.shape)
            # --- TTA 循环开始 ---
            batch_tta_logits = []
            batch_tta_losses = []

            # 如果 use_tta=False, 循环只运行1次
            for tta_run_idx in range(n_tta_runs if use_tta else 1):

                current_features_cpu = features_slide_cpu # 默认为原始特征

                # --- 1. 创建增强特征 (第0次运行总是使用原始特征) ---
                if use_tta and tta_run_idx > 0:
                    if tta_type == 'patch_dropout':
                        # (B=1, N, 1) 的随机掩码 (在CPU上)
                        rand_mask = torch.rand(B, N, 1)
                        # tta_param 比例的 patch 设为 0
                        binary_mask = (rand_mask > tta_param).float() 
                        current_features_cpu = features_slide_cpu * binary_mask

                    elif tta_type == 'noise':
                        noise_level = tta_param
                        # (B=1, N, D) 的高斯噪声 (在CPU上)
                        noise = torch.randn_like(features_slide_cpu) * noise_level
                        current_features_cpu = features_slide_cpu + noise

                # --- 2. 准备输入并移动到设备 ---
                features_dict = {"features": current_features_cpu.to(device)}

                # --- 3. 获取模型输出 ---
                raw_outputs = model(features_dict)

                # --- 4. 计算损失 (如果需要) ---
                if criterion:
                    # labels 已经是 float (或在 criterion 中处理)
                    loss_run = criterion(raw_outputs, labels.float())
                    batch_tta_losses.append(loss_run.item())

                # --- 5. 提取 Logits ---
                if isinstance(model, CLAMModel):
                    logits_run = raw_outputs["bag_logit"]

                else:
                    logits_run = raw_outputs

                batch_tta_logits.append(logits_run)

            # --- TTA 循环结束 ---

            # --- 6. 平均 TTA 结果 ---
            # stack([n_runs, B=1, n_classes]) -> mean(dim=0) -> [B=1, n_classes]
            logits = torch.mean(torch.stack(batch_tta_logits, dim=0), dim=0)

            if criterion and len(batch_tta_losses) > 0:
                avg_batch_loss = sum(batch_tta_losses) / len(batch_tta_losses)
                total_loss += avg_batch_loss

            # --- 7. 使用平均后的 Logits 计算指标 ---
            probs = torch.sigmoid(logits)
            predicted = (probs > 0.5).float()

            correct += (predicted == labels).sum().item()
            total += labels.size(0)

            all_logits.append(logits.cpu().numpy()) # 存储平均后的 Logits
            all_labels.append(labels.cpu().numpy())

    # --- 循环外：计算最终指标 ---
    all_logits = np.concatenate(all_logits)
    all_labels = np.concatenate(all_labels)

    auc = np.nan
    if len(np.unique(all_labels)) > 1:
        try:
            auc = roc_auc_score(all_labels, all_logits)
        except Exception as e:
            logger.warning(f"计算 AUC 时出错: {e}")
    else:
        logger.warning("评估集中仅包含一个类别。无法计算 AUC。")

    accuracy = correct / total if total > 0 else 0.0
    avg_loss = total_loss / len(loader) if criterion and len(loader) > 0 else 0.0

    return auc, accuracy, avg_loss

def evaluate_domain_model(
    model: nn.Module,
    loader: DataLoader,
    device: torch.device,
    criterion: Optional[nn.Module] = None,
    use_tta: bool = False,
    n_tta_runs: int = 5,
    tta_type: str = 'patch_dropout', # 'patch_dropout' or 'noise'
    tta_param: float = 0.1
) -> Tuple[float, float, float]:
    """
    [已修复] 兼容 DANN 模型返回 (logits, bag_feature) 元组的情况。
    """
    model.eval()  # 确保处于评估模式
    all_labels, all_logits = [], []
    correct, total = 0, 0
    total_loss = 0.0
    logger = logging.getLogger()

    with torch.no_grad():
        for features_slide_cpu, labels_slide_cpu in loader:
            
            labels = labels_slide_cpu.to(device)
            B, N, D = features_slide_cpu.shape 

            batch_tta_logits = []
            batch_tta_losses = []

            for tta_run_idx in range(n_tta_runs if use_tta else 1):
                
                current_features_cpu = features_slide_cpu 

                if use_tta and tta_run_idx > 0:
                    if tta_type == 'patch_dropout':
                        rand_mask = torch.rand(B, N, 1)
                        binary_mask = (rand_mask > tta_param).float() 
                        current_features_cpu = features_slide_cpu * binary_mask
                    elif tta_type == 'noise':
                        noise_level = tta_param
                        noise = torch.randn_like(features_slide_cpu) * noise_level
                        current_features_cpu = features_slide_cpu + noise
                
                features_dict = {"features": current_features_cpu.to(device)}

                # --- 3. 获取模型输出 ---
                raw_outputs = model(features_dict) # dict (CLAM) or tuple (DANN-TransMIL)

                # --- 4. [!! 关键修复 !!] ---
                # 统一提取 Logits 和 损失计算的输入
                
                if isinstance(model, CLAMModel):
                    # CLAM: 返回字典
                    logits_run = raw_outputs["bag_logit"]
                    loss_input = raw_outputs # CLAMLoss 需要整个 dict
                else:
                    # DANN-TransMIL: 返回元组 (logits, bag_feature)
                    if not isinstance(raw_outputs, tuple):
                         # 安全后备，以防模型未被修改
                         logger.warning(f"模型 {model.__class__.__name__} 未返回元组，假设其直接返回 logits。")
                         logits_run = raw_outputs
                    else:
                         logits_run = raw_outputs[0] # [0] 是 logits
                    
                    loss_input = logits_run # 标准 BCE 损失只需要 logits

                # --- 5. [!! 关键修复 !!] ---
                # 使用 'loss_input' (而不是 raw_outputs) 来计算损失
                if criterion:
                    # labels.float() 确保类型正确
                    loss_run = criterion(loss_input, labels.float())
                    batch_tta_losses.append(loss_run.item())

                # --- 6. [!! 关键修复 !!] ---
                # 确保 'logits_run' 是一个张量, 而不是元组
                batch_tta_logits.append(logits_run)
            
            # --- TTA 循环结束 ---

            # 7. 平均 TTA 结果 (现在 stack 的是张量, 不会报错)
            logits = torch.mean(torch.stack(batch_tta_logits, dim=0), dim=0)
            
            if criterion and len(batch_tta_losses) > 0:
                avg_batch_loss = sum(batch_tta_losses) / len(batch_tta_losses)
                total_loss += avg_batch_loss

            # 8. 使用平均后的 Logits 计算指标
            probs = torch.sigmoid(logits)
            predicted = (probs > 0.5).float()

            correct += (predicted == labels).sum().item()
            total += labels.size(0)

            all_logits.append(logits.cpu().numpy()) 
            all_labels.append(labels.cpu().numpy())

    # --- 循环外：计算最终指标 ---
    all_logits = np.concatenate(all_logits)
    all_labels = np.concatenate(all_labels)

    auc = np.nan
    if len(np.unique(all_labels)) > 1:
        try:
            auc = roc_auc_score(all_labels, all_logits)
        except Exception as e:
            logger.warning(f"计算 AUC 时出错: {e}")
    else:
        logger.warning("评估集中仅包含一个类别。无法计算 AUC。")

    accuracy = correct / total if total > 0 else 0.0
    avg_loss = total_loss / len(loader) if criterion and len(loader) > 0 else 0.0

    return auc, accuracy, avg_loss
