import torch
import torch.nn as nn
import torch.nn.functional as F
import MinkowskiEngine as ME
class MoDBatchTransformerBlock(nn.Module):
    """Wrapper class for integrating a transformer block with Mixture-of-Depths routing.

    Attributes:
        transformer_block (...): Transformer block to be wrapped.
        router_conv (nn.Linear): MLP layer for calculating router weights.
        aux_conv (nn.Linear): MLP layer for calculating auxiliary routing decision.
        capacity (float): Capacity of the mixture-of-depths routing. Default is 0.125.
        aux_loss (torch.Tensor): Auxiliary loss for training auxiliary MLP.

    Notes:
        MoD Paper Link: https://arxiv.org/pdf/2404.02258
    """

    def __init__(
        self,
        transformer_block,
        hidden_size: int,
        capacity: float = 0.25,
    ):
        """Initialize the MoD wrapped transformer block.

        Args:
            transformer_block (...): Transformer block to be wrapped.
            hidden_size (int): Hidden size of the transformer block.
            capacity (float, optional): Capacity of the mixture-of-depths routing.
                Defaults to 0.25.

        Raises:
            ValueError: If the capacity is not in the range (0, 1].

        Note:
            The default capacity of 0.125 is according to the original paper.
        """
        super(MoDBatchTransformerBlock, self).__init__()
        self.transformer_block = transformer_block
        self.router_conv = ME.MinkowskiConvolution(in_channels=hidden_size, out_channels=1, \
                                               kernel_size=2, dimension=3)
        self.aux_conv = ME.MinkowskiConvolution(in_channels=hidden_size, out_channels=1, \
                                               kernel_size=2, dimension=3)

        if capacity <= 0 or capacity > 1:
            raise ValueError(
                f"Capacity must be in the range (0, 1]. Got: {capacity}"
            )
        self.capacity = capacity
        self.d_model = hidden_size

    def forward(self, x: torch.Tensor, pos=None, key_padding_mask=None, voxel_inds=None):
        B, _, _ = x.shape

        # Calculate scalar router weights logits and sum along seq dimension
        router_weights: torch.Tensor = self.router_conv(x).squeeze(-1)

        set_features = x[voxel_inds]
        router_weights = router_weights[voxel_inds]
        if pos is not None:
            set_pos = pos[voxel_inds]
        else:
            set_pos = None
        if pos is not None:
            query = set_features + set_pos
            key = set_features + set_pos
            value = set_features

        batch_router_weights = router_weights.sum(-1)

        if self.training:
            # Calculate top-k indices based on batch-level router weights
            k = int(B * self.capacity)
            topk_batch_indices = torch.topk(batch_router_weights, k, dim=0).indices

            # Generate binary labels for auxiliary MLP training
            aux_targets = torch.zeros(B).to(x.device)
            aux_targets[topk_batch_indices] = 1.0
            aux_targets = aux_targets

            # Calculate auxiliary logits for training auxiliary MLP and sum along seq dimension
            aux_logits = self.aux_conv(x.detach()).squeeze(-1).sum(dim=1)
            aux_logits = aux_logits

            # Calculate auxiliary routing decision (binary 0/1 index)
            aux_decision = (torch.sigmoid(aux_logits) > 0.5).float()

            # Calculate auxiliary loss (Binary Cross Entropy) and save for backward pass
            self.aux_loss = F.binary_cross_entropy_with_logits(aux_logits, aux_targets)
        else:
            # Calculate auxiliary logits for training auxiliary MLP and sum along seq dimension
            aux_logits = self.aux_conv(x.detach()).squeeze(-1).sum(dim=1)
            aux_logits = aux_logits.unsqueeze(1)

            # Calculate auxiliary routing decision (binary 0/1 index)
            aux_decision = (torch.sigmoid(aux_logits) > 0.5).float()

        # Tokens not routed for specialized computation will skip it via the residual connection
        output = x.clone()

        # In the train stage, we use the real top-k decision.
        # In the eval stage, we use the auxiliary router prediction decision.
        topk_decision = (
            aux_targets.bool() if self.training else aux_decision.bool()
        )

        print(topk_decision)

        # 进行批量处理
        selected_tokens_emb = x[topk_decision]
        selected_router_weights = router_weights[topk_decision].unsqueeze(-1)

        if selected_tokens_emb.shape[0] > 0:
            # Apply the transformer block to the selected tokens
            transformer_tokens_emb = (
                self.transformer_block(selected_tokens_emb)
                * selected_router_weights
            ).squeeze(1)

            # Scatter the tokens into output according to the auxiliary decision
            output[topk_decision] = transformer_tokens_emb

        return output


if __name__ == "__main__":
    # Set the seed for reproducibility.
    torch.manual_seed(42)

    # Define the transformer block.
    transformer_block = nn.TransformerEncoderLayer(d_model=512, nhead=1)

    # Wrap the transformer block with MoD.
    mod_transformer_block = MoDBatchTransformerBlock(
        transformer_block, hidden_size=512, capacity=0.5
    )

    # Input tensor.
    # [Shape] x: (batch_size, sequence_length, hidden_size)
    x = torch.rand(100, 64, 512)

    # Forward pass.
    output = mod_transformer_block(x)

    print(output.shape)