import torch
import torch.nn as nn
from monai.networks.blocks.convolutions import Convolution
from monai.networks.layers.factories import split_args
from smz.layers.sparseconvmoe import SparseConvMoE  # Assuming the file is in smz.layers

class UNetWithSparseConvMoE(nn.Module):
    def __init__(
        self,
        in_channels: int = 1,
        out_channels: int = 1,
        features: list = [32, 64, 128, 256],
        spatial_dims: int = 2,
        num_experts: int = 4,
        shared_experts: int = 1,
        top_k: int = 1,
        gate_hidden: int = 128,
        tau: float = 0.5,
        ema_momentum: float = 0.9,
        eps: float = 1e-6,
        act: tuple | str | None = ("swish", {}),
        norm: tuple | str | None = ("group", {"num_groups": 16, 'affine': True}),
        dropout: tuple | str | float | None = None,
        routing_mode: str = 'hard',
        enable_deep_supervision: bool = True,  # Enable nnUNet-style deep supervision
    ):
        super().__init__()
        self.spatial_dims = spatial_dims
        self.features = features
        self.num_stages = len(features)
        self.enable_deep_supervision = enable_deep_supervision

        # Final conv (defined early to ensure attribute exists)
        self.final_conv = Convolution(
            spatial_dims=spatial_dims,
            in_channels=features[0],
            out_channels=out_channels,
            kernel_size=1,
            strides=1,
            padding=0,
            bias=True,
            act=None,
            norm=None,
            dropout=None,
        )

        # Pool and upsample classes based on spatial_dims
        if spatial_dims == 2:
            self.pool = nn.MaxPool2d(2)
            self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
        else:
            self.pool = nn.MaxPool3d(2)
            self.upsample = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=True)

        # Encoder
        self.encoder = nn.ModuleList()
        in_ch = in_channels
        for feat in features:
            self.encoder.append(
                SparseConvMoE(
                    in_channels=in_ch,
                    out_channels=feat,
                    kernel_size=3,
                    num_experts=num_experts,
                    shared_experts=shared_experts,
                    top_k=top_k,
                    gate_hidden=gate_hidden,
                    tau=tau,
                    ema_momentum=ema_momentum,
                    eps=eps,
                    stride=1,
                    padding=1,
                    act=act,
                    norm=norm,
                    dropout=dropout,
                    spatial_dims=spatial_dims,
                    routing_mode=routing_mode,
                )
            )
            in_ch = feat

        # Decoder
        self.decoder = nn.ModuleList()
        for i in range(self.num_stages - 1):
            self.decoder.append(
                nn.Sequential(
                    self.upsample,
                    SparseConvMoE(
                        in_channels=features[self.num_stages - 1 - i] + features[self.num_stages - 2 - i],
                        out_channels=features[self.num_stages - 2 - i],
                        kernel_size=3,
                        num_experts=num_experts,
                        shared_experts=shared_experts,
                        top_k=top_k,
                        gate_hidden=gate_hidden,
                        tau=tau,
                        ema_momentum=ema_momentum,
                        eps=eps,
                        stride=1,
                        padding=1,
                        act=act,
                        norm=norm,
                        dropout=dropout,
                        spatial_dims=spatial_dims,
                        routing_mode=routing_mode,
                    )
                )
            )

        # Auxiliary convolutions for deep supervision
        if self.enable_deep_supervision:
            self.aux_convs = nn.ModuleList([
                Convolution(
                    spatial_dims=spatial_dims,
                    in_channels=features[self.num_stages - 2 - i],
                    out_channels=out_channels,
                    kernel_size=1,
                    strides=1,
                    padding=0,
                    bias=True,
                    act=None,
                    norm=None,
                    dropout=None,
                )
                for i in range(self.num_stages - 1)
            ])


    def forward(self, x):
        # Encoder
        skips = []
        for enc in self.encoder[:-1]:  # All but last
            x = enc(x)
            skips.append(x)
            x = self.pool(x)
        x = self.encoder[-1](x)  # Last encoder

        # Decoder with deep supervision
        aux_outputs = []
        for i, dec in enumerate(self.decoder):
            x = dec[0](x)  # Upsample
            x = torch.cat([x, skips[-(i+1)]], dim=1)  # Skip connection
            x = dec[1](x)  # SparseConvMoE
            if self.enable_deep_supervision:
                aux = self.aux_convs[i](x)
                aux_outputs.append(aux)

        # Final output
        x = self.final_conv(x)
        
        if self.enable_deep_supervision:
            return aux_outputs[::-1] # Three decoder outputs: high_res, mid_res, low_res
        else:
            return x

if __name__ == "__main__":
    # Test 2D UNet
    model = UNetWithSparseConvMoE(in_channels=1, out_channels=2, features=[32, 64, 128, 256, 320], spatial_dims=2,
                                  enable_deep_supervision=True)
    x = torch.randn(1, 1, 256, 256)
    y = model(x)
    print("Number of outputs:", len(y))
    for i, out in enumerate(y):
        print(f"Output {i} shape:", out.shape)  # High res to low res: (128,128), (64,64), (32,32)