from typing import Sequence, Union, Optional
from torch import nn, Tensor
import torch
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule, OptMultiConfig
from mmengine.config import ConfigType

class SPPELANBottleneck(BaseModule):
    """SPP-ELAN Bottleneck with configurable pooling kernels and channels.

    Args:
        in_channels (int): Input channels.
        out_channels (int): Output channels.
        kernel_sizes (Sequence[int]): Pooling kernel sizes for sequential pyramid pooling.
            Defaults to (5, 5, 5).
        use_conv_first (bool): Whether to use initial convolution. Defaults to True.
        mid_channels_scale (float): Channel scaling factor for initial convolution.
            Valid when use_conv_first=True. Defaults to 0.5.
        conv_cfg (ConfigType): Config for convolution layers. Defaults to None.
        norm_cfg (ConfigType): Config for normalization. 
            Defaults to dict(type='BN', momentum=0.03, eps=0.001).
        act_cfg (ConfigType): Config for activation.
            Defaults to dict(type='SiLU', inplace=True).
        init_cfg (OptMultiConfig): Initialization config. Defaults to None.
    """

    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        kernel_sizes: Sequence[int] = (5, 5, 5),
        use_conv_first: bool = True,
        mid_channels_scale: float = 0.5,
        conv_cfg: ConfigType = None,
        norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001),
        act_cfg: ConfigType = dict(type='SiLU', inplace=True),
        init_cfg: OptMultiConfig = None
    ):
        super().__init__(init_cfg=init_cfg)
        
        # Initial convolution
        self.mid_channels = int(in_channels * mid_channels_scale) if use_conv_first else in_channels
        if use_conv_first:
            self.conv1 = ConvModule(
                in_channels,
                self.mid_channels,
                kernel_size=1,
                conv_cfg=conv_cfg,
                norm_cfg=norm_cfg,
                act_cfg=act_cfg)
        else:
            self.conv1 = None

        # Sequential pooling layers
        self.poolings = nn.ModuleList([
            nn.MaxPool2d(kernel_size=k, stride=1, padding=k//2)
            for k in kernel_sizes
        ])

        # Final projection convolution
        self.conv2 = ConvModule(
            self.mid_channels * (len(kernel_sizes) + 1),  # +1 for initial feature
            out_channels,
            kernel_size=1,
            conv_cfg=conv_cfg,
            norm_cfg=norm_cfg,
            act_cfg=act_cfg)

    def forward(self, x: Tensor) -> Tensor:
        # Initial feature transformation
        if self.conv1 is not None:
            x = self.conv1(x)

        # Sequential pooling with feature accumulation
        features = [x]
        current = x
        for pool in self.poolings:
            current = pool(current)
            features.append(current)

        # Concatenate and project
        return self.conv2(torch.cat(features, dim=1))


# Modified build_stage_layer method
def build_stage_layer(self, stage_idx: int, setting: list) -> list:
    """Build a stage layer.
    
    Args:
        stage_idx (int): The index of a stage layer.
        setting (list): The architecture setting of a stage layer.
    """
    in_channels, out_channels, num_blocks, use_elan, use_spp = setting

    in_channels = make_divisible(in_channels, self.widen_factor)
    out_channels = make_divisible(out_channels, self.widen_factor)
    num_blocks = make_round(num_blocks, self.deepen_factor)

    stage = []
    stage.append(ADown(in_channels, out_channels))  # Keep ADown layer

    # Main blocks
    for _ in range(num_blocks):
        if use_elan:
            stage.append(ELAN1(
                out_channels,
                out_channels,
                make_divisible(out_channels//2, self.widen_factor),
                make_divisible(out_channels//4, self.widen_factor)
            ))
        else:
            stage.append(RepNCSPELAN4(
                out_channels,
                out_channels,
                make_divisible(out_channels//2, self.widen_factor),
                make_divisible(out_channels//4, self.widen_factor),
                c5=1
            ))

    # Replace SPPELAN with SPPELANBottleneck
    if use_spp:
        stage.append(SPPELANBottleneck(
            in_channels=out_channels,
            out_channels=out_channels,
            kernel_sizes=(5, 5, 5),  # Maintain original 3x5 pooling
            mid_channels_scale=0.5,  # Match original channel scaling
            use_conv_first=True,    # Keep initial conv like original
            norm_cfg=self.norm_cfg,  # Inherit backbone normalization
            act_cfg=self.act_cfg     # Inherit backbone activation
        ))

    return stage