import os
import numpy as np
import torch
import torch.nn as nn
from typing import Dict, Union, Tuple, Optional

# *** 导入 Transformer 模块 ***
from torch.nn import TransformerEncoder, TransformerEncoderLayer
import torch.nn.functional as F

# 假设 ABMILSlideEncoder 位于此文件中，或者已正确导入
# from trident.slide_encoder_models import ABMILSlideEncoder
# --- 模拟 ABMILSlideEncoder 以便代码可运行 ---
# (如果您有实际的 ABMILSlideEncoder，请删除这个模拟版本)
class ABMILSlideEncoder(nn.Module):
    """一个模拟的 ABMILSlideEncoder，用于演示。"""

    def __init__(self, input_feature_dim, n_heads, head_dim, dropout, gated, **kwargs):
        super().__init__()
        self.input_feature_dim = input_feature_dim
        self.gated = gated
        dim = head_dim * n_heads

        self.attention_v = nn.Linear(input_feature_dim, dim)
        self.attention_u = nn.Linear(input_feature_dim, dim)
        self.attention_w = nn.Linear(dim, 1)

        if self.gated:
            self.attention_v_gate = nn.Linear(input_feature_dim, dim)
            self.attention_u_gate = nn.Linear(input_feature_dim, dim)
            self.attention_w_gate = nn.Linear(dim, 1)

    def forward(self, x, return_raw_attention=False):
        # x 可能是 [B, N, D] 的张量或包含 "features" 的字典
        if isinstance(x, dict):
            features = x["features"]
        else:
            features = x

        # [B, N, D] -> [B, N, dim]
        v = self.attention_v(features)
        u = self.attention_u(features)

        # [B, N, dim] -> [B, N, 1]
        if self.gated:
            v_gate = torch.tanh(self.attention_v_gate(features))
            u_gate = torch.sigmoid(self.attention_u_gate(features))
            gated_v = v_gate * u_gate

            a_unnorm = self.attention_w(torch.tanh(v) * gated_v)
        else:
            a_unnorm = self.attention_w(torch.tanh(v) * torch.tanh(u))

        # [B, N, 1]
        A = torch.softmax(a_unnorm, dim=1)

        # [B, N, D] * [B, N, 1] -> [B, D]
        bag_feature = torch.sum(features * A, dim=1)

        if return_raw_attention:
            return bag_feature, A.squeeze(-1)
        return bag_feature


# --- 模拟结束 ---


# ----------------------------------------------------------------------------
# --- 4. 模型定义 (MODEL DEFINITIONS) ---
# ----------------------------------------------------------------------------
# (保持所有模型定义与您提供的代码一致)
feature_dim = 1536  # 1536 1024  # 示例特征维度，请根据实际情况调整


# 1. 原始模型 (ABMIL)
class BinaryClassificationModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_heads=1,
        head_dim=512,
        dropout=0.0,
        gated=True,
        hidden_dim=256,
    ):
        super().__init__()
        self.feature_encoder = ABMILSlideEncoder(
            freeze=False,
            input_feature_dim=input_feature_dim,
            n_heads=n_heads,
            head_dim=head_dim,
            dropout=dropout,
            gated=gated,
        )
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x, return_raw_attention=False):
        if return_raw_attention:
            features, attn = self.feature_encoder(x, return_raw_attention=True)
        else:
            features = self.feature_encoder(x)
        logits = self.classifier(features).squeeze(1)
        if return_raw_attention:
            return logits, attn
        return logits


# 2. DSMILModel
class DSMILModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        hidden_dim=256,
        n_heads=1,
        head_dim=512,
        dropout=0.0,
        gated=True,
    ):
        super().__init__()
        self.instance_classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )
        self.bag_aggregator = ABMILSlideEncoder(
            freeze=False,
            input_feature_dim=input_feature_dim,
            n_heads=n_heads,
            head_dim=head_dim,
            dropout=dropout,
            gated=gated,
        )
        self.bag_classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        features = x_dict["features"]  # [B, N, D]
        instance_logits = self.instance_classifier(features)  # [B, N, 1]
        critical_logit, _ = torch.max(instance_logits, dim=1)  # [B, 1]

        # 注意：这里假设 ABMILSlideEncoder 可以接受字典
        bag_feature = self.bag_aggregator(x_dict)  # [B, D]

        bag_logit = self.bag_classifier(bag_feature)  # [B, 1]
        final_logit = (critical_logit + bag_logit) / 2
        return final_logit.squeeze(1)


# 3. TransMILModel
class TransMILModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_head=8,
        num_encoder_layers=2,
        dim_feedforward=512,
        hidden_dim=256,
        dropout=0.3,
    ):
        super().__init__()
        self.input_feature_dim = input_feature_dim
        encoder_layer = TransformerEncoderLayer(
            d_model=input_feature_dim,
            nhead=n_head,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True,
        )
        self.transformer_encoder = TransformerEncoder(
            encoder_layer, num_layers=num_encoder_layers
        )
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        features = x_dict["features"]  # [B, N, D]
        context_features = self.transformer_encoder(features)  # [B, N, D]
        bag_feature = torch.mean(context_features, dim=1)  # [B, D]
        logits = self.classifier(bag_feature)
        return logits.squeeze(1)


class TransMIL_CLSToken(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_head=8,
        num_encoder_layers=2,
        dim_feedforward=512,
        hidden_dim=256,
        dropout=0.3,
    ):
        super().__init__()
        self.input_feature_dim = input_feature_dim

        # 1. 定义可学习的 CLS token
        # 形状为 [1, 1, D]，以便于广播到 (B, 1, D)
        self.cls_token = nn.Parameter(torch.randn(1, 1, input_feature_dim))

        encoder_layer = TransformerEncoderLayer(
            d_model=input_feature_dim,
            nhead=n_head,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True,
        )
        self.transformer_encoder = TransformerEncoder(
            encoder_layer, num_layers=num_encoder_layers
        )
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        features = x_dict["features"]  # 原始特征 [B, N, D]
        B = features.shape[0]  # 获取 Batch Size

        # 2. 扩展 CLS token 到当前 batch 的大小
        # [1, 1, D] -> [B, 1, D]
        cls_tokens = self.cls_token.expand(B, -1, -1)

        # 3. 将 CLS token 拼接到实例特征序列的最前面
        # [B, 1, D] + [B, N, D] -> [B, N+1, D]
        x = torch.cat((cls_tokens, features), dim=1)

        # 4. 将拼接后的序列送入 Transformer Encoder
        # 输出形状仍为 [B, N+1, D]
        context_features = self.transformer_encoder(x)

        # 5. 提取 CLS token 对应的输出向量 (位于序列的第0个位置)
        # 这取代了原来的 torch.mean(..., dim=1)
        # 形状: [B, D]
        bag_feature = context_features[:, 0]

        # 6. 将聚合后的 bag_feature 送入分类器
        logits = self.classifier(bag_feature)

        return logits.squeeze(1)


# 4. HybridAttnMILModel (v2)
class HybridAttnMILModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_head=8,
        num_encoder_layers=2,
        transformer_ff_dim=2048,
        abmil_n_heads=1,
        abmil_head_dim=512,
        hidden_dim=256,
        dropout=0.1,
    ):
        super().__init__()
        encoder_layer = TransformerEncoderLayer(
            d_model=input_feature_dim,
            nhead=n_head,
            dim_feedforward=transformer_ff_dim,
            dropout=dropout,
            batch_first=True,
            norm_first=True,
        )
        self.context_encoder = TransformerEncoder(
            encoder_layer,
            num_layers=num_encoder_layers,
            norm=nn.LayerNorm(input_feature_dim),
        )
        self.gate_net = nn.Sequential(
            nn.Linear(input_feature_dim, 64),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(64, 1),
            nn.Sigmoid(),
        )
        self.attention_aggregator = ABMILSlideEncoder(
            freeze=False,
            input_feature_dim=input_feature_dim,
            n_heads=abmil_n_heads,
            head_dim=abmil_head_dim,
            dropout=dropout,
            gated=True,
        )
        self.bag_norm = nn.LayerNorm(input_feature_dim)
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        features = x_dict["features"]  # [B, N, D]
        context_features = self.context_encoder(features)  # [B, N, D]
        gate = self.gate_net(features)  # [B, N, 1]
        fused_features = (1 - gate) * features + gate * context_features  # [B, N, D]

        # ABMILSlideEncoder 期望一个字典，或者我们直接传递 tensor
        # 根据 HybridAttnMILModel 的逻辑，这里传递 tensor 是正确的
        bag_feature = self.attention_aggregator(fused_features)  # [B, D]

        bag_feature = self.bag_norm(bag_feature)
        logits = self.classifier(bag_feature)
        return logits.squeeze(1)


# 5. LinearProbeModel
class LinearProbeModel(nn.Module):
    def __init__(self, input_feature_dim=feature_dim):
        super().__init__()
        self.input_feature_dim = input_feature_dim
        self.classifier = nn.Linear(self.input_feature_dim, 1)

    def forward(self, x_dict: Dict[str, torch.Tensor]) -> torch.Tensor:
        features = x_dict["features"]  # [B, N, D]
        bag_feature = torch.mean(features, dim=1)  # [B, D]
        logits = self.classifier(bag_feature)
        return logits.squeeze(1)


# 6. MAEMILModel
class MAEMILModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_head=8,
        num_encoder_layers=2,
        dim_feedforward=512,
        hidden_dim=256,
        dropout=0.1,
        mask_ratio=0.75,
    ):
        super().__init__()
        self.input_feature_dim = input_feature_dim
        self.mask_ratio = mask_ratio
        encoder_layer = TransformerEncoderLayer(
            d_model=input_feature_dim,
            nhead=n_head,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True,
        )
        self.transformer_encoder = TransformerEncoder(
            encoder_layer, num_layers=num_encoder_layers
        )
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        features = x_dict["features"]
        B, N, D = features.shape

        if self.training:
            num_visible = int(N * (1 - self.mask_ratio))
            if num_visible < 1:
                num_visible = 1
            noise = torch.rand(B, N, device=features.device)
            ids_shuffle = torch.argsort(noise, dim=1)
            ids_visible = ids_shuffle[:, :num_visible]
            features_visible = torch.gather(
                features, dim=1, index=ids_visible.unsqueeze(-1).expand(-1, -1, D)
            )
            context_features = self.transformer_encoder(features_visible)
        else:
            context_features = self.transformer_encoder(features)

        bag_feature = torch.mean(context_features, dim=1)
        logits = self.classifier(bag_feature)
        return logits.squeeze(1)


class MAEMILModel_CLSToken(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        n_head=8,
        num_encoder_layers=2,
        dim_feedforward=512,
        hidden_dim=256,
        dropout=0.1,
        mask_ratio=0.75,
    ):
        super().__init__()
        self.input_feature_dim = input_feature_dim
        self.mask_ratio = mask_ratio

        # 1. 定义可学习的 CLS token
        # 形状为 [1, 1, D]，以便于广播到 (B, 1, D)
        self.cls_token = nn.Parameter(torch.randn(1, 1, input_feature_dim))

        encoder_layer = TransformerEncoderLayer(
            d_model=input_feature_dim,
            nhead=n_head,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True,
        )
        self.transformer_encoder = TransformerEncoder(
            encoder_layer, num_layers=num_encoder_layers
        )
        self.classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x_dict):
        features = x_dict["features"]  # 原始实例特征 [B, N, D]
        B, N, D = features.shape

        # 2. 扩展 CLS token 到当前 batch 的大小
        # [1, 1, D] -> [B, 1, D]
        cls_tokens = self.cls_token.expand(B, -1, -1)

        if self.training:
            # --- MAE 掩码逻辑 (仅对实例特征) ---
            num_visible = int(N * (1 - self.mask_ratio))
            if num_visible < 1:
                num_visible = 1
            noise = torch.rand(B, N, device=features.device)  # [B, N]
            ids_shuffle = torch.argsort(noise, dim=1)  # [B, N]
            ids_visible = ids_shuffle[:, :num_visible]  # [B, num_visible]

            # 收集可见的实例特征
            # [B, num_visible, D]
            features_visible = torch.gather(
                features, dim=1, index=ids_visible.unsqueeze(-1).expand(-1, -1, D)
            )

            # 3. (Training) 将 CLS token 拼接到 *可见的* 实例特征序列的最前面
            # [B, 1, D] + [B, num_visible, D] -> [B, 1 + num_visible, D]
            x = torch.cat((cls_tokens, features_visible), dim=1)

            # 4. (Training) 将拼接后的序列送入 Transformer Encoder
            context_features = self.transformer_encoder(x)

        else:
            # --- 评估逻辑 (使用所有特征) ---

            # 3. (Inference) 将 CLS token 拼接到 *所有* 实例特征序列的最前面
            # [B, 1, D] + [B, N, D] -> [B, 1 + N, D]
            x = torch.cat((cls_tokens, features), dim=1)

            # 4. (Inference) 将拼接后的序列送入 Transformer Encoder
            context_features = self.transformer_encoder(x)

        # 5. 提取 CLS token 对应的输出向量 (始终位于序列的第0个位置)
        # 这取代了原来的 torch.mean(..., dim=1)
        # 形状: [B, D]
        bag_feature = context_features[:, 0]

        # 6. 将聚合后的 bag_feature 送入分类器
        logits = self.classifier(bag_feature)

        return logits.squeeze(1)


# --- 占位符 (Placeholders) ---
# 假设 feature_dim 已经在外部定义 (e.g., feature_dim = 1024)
# 并且 ABMILSlideEncoder 也已经定义。
#
# 为了使代码可运行，我添加一个 ABMILSlideEncoder 的模拟 (Mock) 实现：
feature_dim = 1024

class ABMILSlideEncoder(nn.Module):
    def __init__(self, input_feature_dim, n_heads, head_dim, dropout, gated, **kwargs):
        super().__init__()
        # 这是一个简化的 Gated Attention 实现，用于演示
        self.attention_V = nn.Sequential(
            nn.Linear(input_feature_dim, head_dim),
            nn.Tanh()
        )
        self.attention_U = nn.Sequential(
            nn.Linear(input_feature_dim, head_dim),
            nn.Sigmoid()
        )
        self.attention_w = nn.Linear(head_dim, 1)

    def forward(self, x, return_raw_attention=False):
        # x shape: [B, N, D]
        if isinstance(x,dict):
            x = x["features"]
        A_V = self.attention_V(x)  # [B, N, head_dim]
        A_U = self.attention_U(x)  # [B, N, head_dim]
        A = self.attention_w(A_V * A_U) # [B, N, 1]
        
        # A_raw 是未归一化的原始注意力分数
        A_raw = A.squeeze(-1) # [B, N]
        
        # Softmax 得到归一化的注意力权重
        attn_weights = F.softmax(A_raw, dim=1)  # [B, N]

        # (B, 1, N) @ (B, N, D) -> (B, 1, D)
        bag_feature = torch.bmm(attn_weights.unsqueeze(1), x).squeeze(1) # [B, D]

        if return_raw_attention:
            # 注意：CLAM 损失需要 *未归一化* 的原始分数 (A_raw)
            # 但传统的 ABMIL 可能返回归一化的 (attn_weights)
            # 这里的实现返回 A_raw 来匹配 CLAM
            return bag_feature, A_raw
        return bag_feature
# --- 占位符结束 ---


# 7. CLAMModel (已完善为 CLAM-SB)
class CLAMModel(nn.Module):
    def __init__(
        self,
        input_feature_dim=feature_dim,
        hidden_dim=256,
        n_heads=1,
        head_dim=512,
        dropout=0.1,
        gated=True,
        n_classes=1 # 1 用于二分类 (输出 1 个 logit)
    ):
        super().__init__()
        
        # 1. Instance-Level Branch (用于实例级别聚类损失)
        self.instance_classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, n_classes), # 输出维度为 n_classes
        )

        # 2. Bag-Level Branch (Attention-based Aggregation)
        self.attention_aggregator = ABMILSlideEncoder(
            freeze=False,
            input_feature_dim=input_feature_dim,
            n_heads=n_heads,
            head_dim=head_dim,
            dropout=dropout,
            gated=gated,
        )

        # 3. Bag-Level Classifier (用于包级别损失)
        self.bag_classifier = nn.Sequential(
            nn.Linear(input_feature_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, n_classes), # 输出维度为 n_classes
        )

    def forward(self, x_dict, return_raw_attention=False):
        """
        已恢复 CLAM 的完整 forward pass。
        
        返回一个包含三个元素的字典：
        1. bag_logit: [B, n_classes] - 用于包级别损失
        2. instance_logits: [B, N, n_classes] - 用于实例级别聚类损失
        3. attention: [B, N] - (原始注意力分数) 用于创建聚类伪标签
        """
        features = x_dict["features"]  # [B, N, D]

        # --- 1. Instance-Level Branch ---
        # [B, N, D] -> [B, N, n_classes]
        instance_logits = self.instance_classifier(features)

        # --- 2. Bag-Level Branch (Aggregation) ---
        # bag_feature: [B, D]
        # attn_raw: [B, N] (原始注意力分数, 非 softmax)
        bag_feature, attn_raw = self.attention_aggregator(
            features, return_raw_attention=True
        )

        # --- 3. Bag-Level Classifier ---
        # [B, D] -> [B, n_classes]
        bag_logit = self.bag_classifier(bag_feature)
        
        # --- 4. 打包输出 ---
        
        # 如果是二分类 (n_classes=1), 我们压缩最后一个维度
        if bag_logit.shape[-1] == 1:
            bag_logit = bag_logit.squeeze(1) # [B, 1] -> [B]
            instance_logits = instance_logits.squeeze(-1) # [B, N, 1] -> [B, N]

        results_dict = {
            "bag_logit": bag_logit,             
            "instance_logits": instance_logits,
            "attention": attn_raw
        }
        
        # return_raw_attention 标志现在有点多余，因为我们总是需要 attention
        return results_dict
