import torch
import torch.nn as nn
import torch.nn.functional as F
from monai.networks.blocks.convolutions import Convolution
from typing import List, Optional, Tuple, Union
from monai.networks.layers.factories import split_args
from smz.registry import MODELS

def optimize_group_norm_params(out_channels: int, num_groups: int = 16) -> int:
    """
    Optimize num_groups for GroupNorm to ensure it divides out_channels.
    
    Args:
        out_channels: number of output channels
        num_groups: desired number of groups (will be adjusted if needed)
    
    Returns:
        optimized num_groups that divides out_channels
    """
    # ensure num_groups doesn't exceed out_channels
    num_groups = min(num_groups, out_channels)
    
    # if it already divides, return as is
    if out_channels % num_groups == 0:
        return num_groups
    
    # find the largest divisor <= num_groups
    for g in range(num_groups, 0, -1):
        if out_channels % g == 0:
            return g
    
    # fallback (should not happen for out_channels >= 1)
    return 1


def optimize_norm_params(norm: Union[str, Tuple[str, dict], None], out_channels: int) -> Union[str, Tuple[str, dict], None]:
    """
    Optimize normalization parameters based on type.
    
    Args:
        norm: normalization specification
        out_channels: number of output channels
    
    Returns:
        optimized normalization specification
    """
    if norm is None or norm == 'none':
        return norm
    
    ntype, kargs = split_args(norm)
    
    if ntype == "group":
        num_groups = kargs.get("num_groups", 16)
        optimized_num_groups = optimize_group_norm_params(out_channels, num_groups)
        kargs["num_groups"] = optimized_num_groups
        return (ntype, kargs)
    
    return norm

@MODELS.register_module()
class SparseConvMoE(nn.Module):
    """Sparse Mixed-Expert Convolution layer.

    Args:
        in_channels: input channel count
        out_channels: output channel count
        kernel_size: conv kernel size
        num_experts: number of routing experts
        shared_experts: number of shared experts (always applied to all inputs)
        top_k: number of top experts to route to per sample
        gate_hidden: hidden units in gating MLP
        tau: exponent used to scale logits by EMA usage (loss-free balancing)
        ema_momentum: momentum for EMA update of usage
        eps: small epsilon to avoid div-by-zero
        stride: convolution stride
        padding: convolution padding
        dilation: convolution dilation
        bias: whether to use bias in convolutions
        act: activation function for experts
        norm: normalization for experts
        dropout: dropout for experts
        spatial_dims: number of spatial dimensions (2 or 3)
        capacity_factor: factor for per-expert capacity calculation
        use_capacity: whether to enforce per-expert capacity limits
        fallback: fallback strategy when capacity exceeded ('shared', 'random', 'drop')
        routing_mode: routing mode ('hard' for sparse, 'soft_dense' for dense differentiable)
        softmax_temp: temperature for soft routing
    """

    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        kernel_size: int = 3,
        num_experts: int = 4,
        shared_experts: int = 1,
        top_k: int = 1,
        gate_hidden: int = 128,
        tau: float = 0.5,
        ema_momentum: float = 0.9,
        eps: float = 1e-6,
        stride: int = 1,
        padding: Optional[int] = None,
        dilation: int = 1,
        bias: bool = True,
        # expert normalization/activation options
        act: tuple | str | None = ("swish", {}),
        norm: tuple | str | None = ("group", {"num_groups": 16, 'affine': True}),
        dropout: tuple | str | float | None = None,
        spatial_dims: int = 2,
        # routing options
        capacity_factor: float = 1.0,
        use_capacity: bool = False,
        fallback: str = 'shared',
        routing_mode: str = 'hard',
        softmax_temp: float = 1.0,
    ):
        super().__init__()

        assert spatial_dims in (2, 3), "spatial_dims must be 2 or 3"

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.num_experts = num_experts
        self.shared_experts = max(0, int(shared_experts))
        self.top_k = max(1, int(top_k))
        self.tau = tau
        self.ema_momentum = ema_momentum
        self.eps = eps
        self.spatial_dims = spatial_dims
        # capacity control (per-expert capacity = ceil(capacity_factor * B / E))
        self.capacity_factor = capacity_factor
        self.use_capacity = use_capacity
        # fallback strategy when capacity is exceeded: 'shared' | 'random' | 'drop'
        self.fallback = fallback
        # routing_mode: 'hard' (sparse top-k, batched per-expert) or 'soft_dense' (dense differentiable, each expert applied to full batch)
        self.routing_mode = routing_mode
        # temperature for soft routing when routing_mode == 'soft_dense'
        self.softmax_temp = softmax_temp

        # choose pool classes based on spatial_dims
        AdaptiveAvgPool = nn.AdaptiveAvgPool2d if self.spatial_dims == 2 else nn.AdaptiveAvgPool3d

        # decide conv bias: if we add a normalization after conv, bias in conv is redundant
        conv_bias = bias if norm == 'none' else False

        # optimize norm parameters
        norm = optimize_norm_params(norm, out_channels)

        # shared expert convs (use Expert wrapper) or None
        if self.shared_experts > 0:
            self.shared_convs = nn.ModuleList([
                Convolution(
                    spatial_dims=spatial_dims,
                    in_channels=in_channels,
                    out_channels=out_channels,
                    strides=stride,
                    kernel_size=kernel_size,
                    padding=padding,
                    dilation=dilation,
                    bias=conv_bias,
                    act=act,
                    norm=norm,
                    dropout=dropout
                )
                for _ in range(self.shared_experts)
            ])
        else:
            self.shared_convs = None

        # routing experts (Expert wrappers)
        self.experts = nn.ModuleList([
            Convolution(
                spatial_dims=spatial_dims,
                in_channels=in_channels,
                out_channels=out_channels,
                strides=stride,
                kernel_size=kernel_size,
                padding=padding,
                dilation=dilation,
                bias=conv_bias,
                act=act,
                norm=norm,
                dropout=dropout
            )
            for _ in range(num_experts)
        ])

        # gating: global average pool -> flatten -> optional norm -> MLP -> logits
        # default to LayerNorm for pooled features
        gate_norm_layer = nn.LayerNorm(in_channels)

        self.gate = nn.Sequential(
            AdaptiveAvgPool(1),  # will produce (B, C, 1, 1) or (B, C, 1, 1, 1)
            nn.Flatten(),
            gate_norm_layer,
            nn.Linear(in_channels, gate_hidden),
            nn.ReLU(inplace=True),
            nn.Linear(gate_hidden, num_experts),
        )

        # EMA usage buffer (initialized to small non-zero)
        usage = torch.ones(num_experts, dtype=torch.float32) * (1.0 / num_experts) if num_experts > 0 else torch.zeros(0)
        self.register_buffer("usage_ema", usage)

    def forward(self, x: torch.Tensor):
        """Forward pass.

        Steps:
        1. Compute gate logits and adjust by EMA usage for loss-free balancing.
        2. Pick top-1 expert per sample (sparse routing).
        3. Apply shared conv to all inputs (if present).
        4. For each expert with assigned samples, run its conv on those samples and scatter results back.
        5. Combine shared output + gated routed output (weighted by gate probability for the selected expert).
        """
        return self._forward_impl(x)

    def _forward_impl(self, x: torch.Tensor):
        B, C ,*spatial = x.shape
                # shared path: sum outputs of all shared experts (if any)
        if self.shared_convs is not None:
            shared_out = None
            for conv in self.shared_convs:
                y = conv(x)
                shared_out = y if shared_out is None else shared_out + y
        else:
            shared_out = 0.0

        # gating logits
        logits = self.gate(x)  # (B, num_experts)

        # adjust logits by EMA usage: subtract tau * log(usage) for stable balancing
        usage = self.usage_ema.clamp(min=self.eps)
        logits_adj = logits - self.tau * torch.log(usage.unsqueeze(0))
        logits_adj = torch.clamp(logits_adj, min=-10, max=10)  # Clamp to prevent NaN in softmax

        probs = F.softmax(logits_adj, dim=-1)  # (B, E)

        # top-k routing: for each sample choose top_k experts and combine their outputs weighted by probs
        device = x.device
        B = x.shape[0]
        
        # routing modes
        if self.routing_mode == 'soft_dense':
            # differentiable dense routing: apply each expert to the full batch and weight by soft probabilities
            temp = float(self.softmax_temp)
            if temp <= 0:
                temp = 1.0
            probs_temp = F.softmax(logits_adj / temp, dim=-1)  # (B, E)

            # compute expert outputs for full batch and accumulate
            routed_out = None
            counts = x.new_zeros(self.num_experts, dtype=torch.float32)
            for e, expert in enumerate(self.experts):
                y_e = expert(x)  # (B, C_out, ...)
                w = probs_temp[:, e].view(B, 1, *([1] * self.spatial_dims))
                weighted = y_e * w
                routed_out = weighted if routed_out is None else routed_out + weighted
                counts[e] = float(probs_temp[:, e].sum().item()) / float(B)

            out = shared_out + routed_out

        else:
            if self.num_experts == 0:
                return shared_out  # no experts, just return shared path
            

            # 'hard' sparse routing with batched per-expert conv and optional capacity/fallback
            k = min(self.top_k, self.num_experts)
            topk_vals, topk_idx = torch.topk(probs, k=k, dim=-1)  # (B, k)

            # prepare empty routed output
            routed_out = torch.zeros(size=(B, self.out_channels, *spatial), device=device)

            # track per-expert counts for EMA update (fractional due to top-k blending)
            counts = x.new_zeros(self.num_experts, dtype=torch.float32)

            # compute per-expert capacity if enabled
            if self.use_capacity:
                capacity = max(1, int(self.capacity_factor * (B / float(self.num_experts))))
            else:
                capacity = None

            # Optimized assignment using tensors
            assigned_mask = torch.zeros(B, self.num_experts, dtype=torch.bool, device=device)
            expert_counts = torch.zeros(self.num_experts, dtype=torch.int32, device=device)

            for i in range(B):
                for j in range(k):
                    e = topk_idx[i, j].item()
                    if capacity is None or expert_counts[e] < capacity:
                        assigned_mask[i, e] = True
                        expert_counts[e] += 1
                        break  # Assign to first available

            if self.use_capacity and self.fallback == 'random':
                remaining = capacity - expert_counts
                for i in range(B):
                    if assigned_mask[i].any():
                        continue
                    e = torch.argmax(remaining).item()
                    if remaining[e] > 0:
                        assigned_mask[i, e] = True
                        remaining[e] -= 1

            # Process each expert with tensor indexing
            for e in range(self.num_experts):
                inds = torch.where(assigned_mask[:, e])[0]
                if len(inds) == 0:
                    continue
                x_e = torch.index_select(x, 0, inds)  # Use index_select to avoid CUDA advanced indexing issues
                y_e = self.experts[e](x_e)
                weights = topk_vals[inds, (topk_idx[inds] == e).nonzero(as_tuple=True)[1]]
                w = weights.view(-1, 1, *([1] * self.spatial_dims))
                # Accumulate using loop to avoid CUDA advanced indexing issues
                for i in range(len(inds)):
                    routed_out[inds[i]] += y_e[i] * w[i]
                counts[e] = weights.sum() / B

            out = shared_out + routed_out


        with torch.no_grad():
            # counts is fraction of batch assigned to each expert
            counts = counts.to(self.usage_ema.device)
            self.usage_ema.mul_(self.ema_momentum).add_(counts * (1.0 - self.ema_momentum))

        return out

    def get_usage(self) -> torch.Tensor:
        """Return the current EMA usage (normalized fractions)."""
        return self.usage_ema.clone()
    
if __name__ == "__main__":
    # quick smoke test
    m = SparseConvMoE(16, 32, kernel_size=3, num_experts=6, top_k=3, shared_experts=1)  # moe
    # m = SparseConvMoE(16, 32, kernel_size=3, num_experts=0, top_k=1, shared_experts=4)   # dense
    # m = SparseConvMoE(16, 32, kernel_size=3, num_experts=4, top_k=1, shared_experts=0, routing_mode='soft')   # soft
    x = torch.randn(8, 16, 32, 32)
    y = m(x)
    print(m)
    print("out", y.shape)