# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union

import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmdet.models.backbones.csp_darknet import CSPLayer, Focus
from mmdet.utils import ConfigType, OptMultiConfig
from ..utils import make_divisible, make_round
from mmengine.model import constant_init, kaiming_init
from ..layers import RepNCSPELAN4, SPPELAN, ELAN1, Silence , ADown, Conv  # ， Silence， SPPELAN, ADown

from mmyolo.registry import MODELS
from ..layers import CSPLayerWithTwoConv, SPPFBottleneck, SPPELANBottleneck
from ..utils import make_divisible, make_round
from .base_backbone import BaseBackbone
import warnings

class SimAM(torch.nn.Module):
    def __init__(self, e_lambda=1e-4):
        super().__init__()
        self.activ = torch.nn.Sigmoid()
        self.e_lambda = e_lambda

    def forward(self, x):
        b, c, h, w = x.size()
        n = w * h - 1
        x_mu = torch.mean(x, dim=[2, 3], keepdim=True)
        y = (x - x_mu).pow(2)
        y = y / (4 * (torch.sum(y, dim=[2, 3], keepdim=True) / n + self.e_lambda) + 0.5)
        return x * self.activ(y)

# class ADown(torch.nn.Module):
#     """YOLOv9's efficient downsampling module"""
#     def __init__(self, c1, c2, norm_cfg, act_cfg):
#         super().__init__()
#         self.c = c2 // 2
#         self.cv1 = ConvModule(c1, self.c, 3, 2, 1, norm_cfg=norm_cfg, act_cfg=act_cfg)
#         self.cv2 = ConvModule(c1, self.c, 1, 1, 0, norm_cfg=norm_cfg, act_cfg=act_cfg)
#         self.cv3 = ConvModule(self.c, self.c, 3, 2, 1, norm_cfg=norm_cfg, act_cfg=act_cfg)
#         self.cv4 = ConvModule(self.c, self.c, 1, 1, 0, norm_cfg=norm_cfg, act_cfg=act_cfg)
#         self.mp = torch.nn.MaxPool2d(2, 2)
#         self.cv5 = ConvModule(4 * self.c, c2, 1, 1, 0, norm_cfg=norm_cfg, act_cfg=act_cfg)

#     def forward(self, x):
#         x1 = self.cv1(x)
#         x2 = self.mp(x)
#         x3 = self.cv3(x2)
#         x4 = self.cv4(x3)
#         x5 = torch.cat([x1, x2, x3, x4], 1)
#         return self.cv5(x5)

        
class EELAN(nn.Module):
    def __init__(self, c1, c2, depth=1, width=1.0):
        super().__init__()
        mid_channels = make_divisible(c2 * width, 8)
        # first projection
        self.proj = ConvModule(c1, mid_channels, 1, 1)
        # a sequence of ELAN1-style mini-blocks
        self.blocks = nn.ModuleList([
            ELAN1(mid_channels, mid_channels, mid_channels, mid_channels)
            for _ in range(depth)
        ])
        # final output projection
        self.out_proj = ConvModule(mid_channels * depth, c2, 1, 1)

    def forward(self, x):
        x = self.proj(x)
        feats = []
        for block in self.blocks:
            x = block(x)
            feats.append(x)
        # concatenate all intermediate outputs
        return self.out_proj(torch.cat(feats, dim=1))

## csp_darknet.py
# # This is the original code
@MODELS.register_module()
class YOLOv8CSPDarknet(BaseBackbone):
    print(" THIS IS IN THE BASE YOLOV8 REPLACE SPP")
    """CSP-Darknet backbone used in YOLOv8.

    Args:
        arch (str): Architecture of CSP-Darknet, from {P5}.
            Defaults to P5.
        last_stage_out_channels (int): Final layer output channel.
            Defaults to 1024.
        plugins (list[dict]): List of plugins for stages, each dict contains:
            - cfg (dict, required): Cfg dict to build plugin.
            - stages (tuple[bool], optional): Stages to apply plugin, length
              should be same as 'num_stages'.
        deepen_factor (float): Depth multiplier, multiply number of
            blocks in CSP layer by this amount. Defaults to 1.0.
        widen_factor (float): Width multiplier, multiply number of
            channels in each layer by this amount. Defaults to 1.0.
        input_channels (int): Number of input image channels. Defaults to: 3.
        out_indices (Tuple[int]): Output from which stages.
            Defaults to (2, 3, 4).
        frozen_stages (int): Stages to be frozen (stop grad and set eval
            mode). -1 means not freezing any parameters. Defaults to -1.
        norm_cfg (dict): Dictionary to construct and config norm layer.
            Defaults to dict(type='BN', requires_grad=True).
        act_cfg (dict): Config dict for activation layer.
            Defaults to dict(type='SiLU', inplace=True).
        norm_eval (bool): Whether to set norm layers to eval mode, namely,
            freeze running stats (mean and var). Note: Effect on Batch Norm
            and its variants only. Defaults to False.
        init_cfg (Union[dict,list[dict]], optional): Initialization config
            dict. Defaults to None.

    Example:
        >>> from mmyolo.models import YOLOv8CSPDarknet
        >>> import torch
        >>> model = YOLOv8CSPDarknet()
        >>> model.eval()
        >>> inputs = torch.rand(1, 3, 416, 416)
        >>> level_outputs = model(inputs)
        >>> for level_out in level_outputs:
        ...     print(tuple(level_out.shape))
        ...
        (1, 256, 52, 52)
        (1, 512, 26, 26)
        (1, 1024, 13, 13)
    """
    # From left to right:
    # in_channels, out_channels, num_blocks, add_identity, use_spp
    # the final out_channels will be set according to the param.
    arch_settings = {
        'P5': [[64, 128, 3, True, False], [128, 256, 6, True, False],
               [256, 512, 6, True, False], [512, None, 3, True, True]],
    }

    def __init__(self,
                 arch: str = 'P5',
                 last_stage_out_channels: int = 1024,
                 plugins: Union[dict, List[dict]] = None,
                 deepen_factor: float = 1.25,
                 widen_factor: float = 1.25,
                 input_channels: int = 3,
                 out_indices: Tuple[int] = (2, 3, 4),
                 frozen_stages: int = -1,
                 norm_cfg: ConfigType = dict(
                     type='BN', momentum=0.03, eps=0.001),
                 act_cfg: ConfigType = dict(type='SiLU', inplace=True),
                 norm_eval: bool = False,
                 init_cfg: OptMultiConfig = None):
        self.arch_settings[arch][-1][1] = last_stage_out_channels
        super().__init__(
            self.arch_settings[arch],
            deepen_factor,
            widen_factor,
            input_channels=input_channels,
            out_indices=out_indices,
            plugins=plugins,
            frozen_stages=frozen_stages,
            norm_cfg=norm_cfg,
            act_cfg=act_cfg,
            norm_eval=norm_eval,
            init_cfg=init_cfg)

    def build_stem_layer(self) -> nn.Module:
        """Build a stem layer."""
        return ConvModule(
            self.input_channels,
            make_divisible(self.arch_setting[0][0], self.widen_factor),
            kernel_size=3,
            stride=2,
            padding=1,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg)

    def build_stage_layer(self, stage_idx: int, setting: list) -> list:
        """Build a stage layer.

        Args:
            stage_idx (int): The index of a stage layer.
            setting (list): The architecture setting of a stage layer.
        """
        in_channels, out_channels, num_blocks, add_identity, use_spp = setting

        in_channels = make_divisible(in_channels, self.widen_factor)
        out_channels = make_divisible(out_channels, self.widen_factor)
        num_blocks = make_round(num_blocks, self.deepen_factor)
        stage = []

        conv_layer = ConvModule(
            in_channels,
            out_channels,
            kernel_size=3,
            stride=2,
            padding=1,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg)
        stage.append(conv_layer)

        # Only enhance the last two stages
        # csp_layer = CSPLayerWithTwoConv(
        #         out_channels,
        #         out_channels,
        #         num_blocks=num_blocks,
        #         add_identity=add_identity,
        #         norm_cfg=self.norm_cfg,
        #         act_cfg=self.act_cfg
        #     )
        # stage.append(csp_layer)

        elan = EELAN(
            out_channels,
            out_channels,
            depth=num_blocks,
            width=self.widen_factor
        )
        stage.append(elan)

        if use_spp:
            spp = SPPFBottleneck(
                out_channels,
                out_channels,
                kernel_sizes=5,
                norm_cfg=self.norm_cfg,
                act_cfg=self.act_cfg)
            stage.append(spp)
            # sppelan_layer = SPPELAN(
            #     out_channels,
            #     out_channels,
            #     make_divisible(out_channels * 0.5, self.widen_factor)
            # )
            # stage.append(sppelan_layer)
        return stage

    def init_weights(self):
        """Initialize the parameters."""
        if self.init_cfg is None:
            for m in self.modules():
                if isinstance(m, torch.nn.Conv2d):
                    # In order to be consistent with the source code,
                    # reset the Conv2d initialization parameters
                    m.reset_parameters()
        else:
            super().init_weights()

@MODELS.register_module()
class YOLOv5CSPDarknet(BaseBackbone):
    """CSP-Darknet backbone used in YOLOv5.
    Args:
        arch (str): Architecture of CSP-Darknet, from {P5, P6}.
            Defaults to P5.
        plugins (list[dict]): List of plugins for stages, each dict contains:
            - cfg (dict, required): Cfg dict to build plugin.
            - stages (tuple[bool], optional): Stages to apply plugin, length
              should be same as 'num_stages'.
        deepen_factor (float): Depth multiplier, multiply number of
            blocks in CSP layer by this amount. Defaults to 1.0.
        widen_factor (float): Width multiplier, multiply number of
            channels in each layer by this amount. Defaults to 1.0.
        input_channels (int): Number of input image channels. Defaults to: 3.
        out_indices (Tuple[int]): Output from which stages.
            Defaults to (2, 3, 4).
        frozen_stages (int): Stages to be frozen (stop grad and set eval
            mode). -1 means not freezing any parameters. Defaults to -1.
        norm_cfg (dict): Dictionary to construct and config norm layer.
            Defaults to dict(type='BN', requires_grad=True).
        act_cfg (dict): Config dict for activation layer.
            Defaults to dict(type='SiLU', inplace=True).
        norm_eval (bool): Whether to set norm layers to eval mode, namely,
            freeze running stats (mean and var). Note: Effect on Batch Norm
            and its variants only. Defaults to False.
        init_cfg (Union[dict,list[dict]], optional): Initialization config
            dict. Defaults to None.
    Example:
        >>> from mmyolo.models import YOLOv5CSPDarknet
        >>> import torch
        >>> model = YOLOv5CSPDarknet()
        >>> model.eval()
        >>> inputs = torch.rand(1, 3, 416, 416)
        >>> level_outputs = model(inputs)
        >>> for level_out in level_outputs:
        ...     print(tuple(level_out.shape))
        ...
        (1, 256, 52, 52)
        (1, 512, 26, 26)
        (1, 1024, 13, 13)
    """
    # From left to right:
    # in_channels, out_channels, num_blocks, add_identity, use_spp
    arch_settings = {
        'P5': [[64, 128, 3, True, False], [128, 256, 6, True, False],
               [256, 512, 9, True, False], [512, 1024, 3, True, True]],
        'P6': [[64, 128, 3, True, False], [128, 256, 6, True, False],
               [256, 512, 9, True, False], [512, 768, 3, True, False],
               [768, 1024, 3, True, True]]
    }

    def __init__(self,
                 arch: str = 'P5',
                 plugins: Union[dict, List[dict]] = None,
                 deepen_factor: float = 1.0,
                 widen_factor: float = 1.0,
                 input_channels: int = 3,
                 out_indices: Tuple[int] = (2, 3, 4),
                 frozen_stages: int = -1,
                 norm_cfg: ConfigType = dict(
                     type='BN', momentum=0.03, eps=0.001),
                 act_cfg: ConfigType = dict(type='SiLU', inplace=True),
                 norm_eval: bool = False,
                 init_cfg: OptMultiConfig = None):
        super().__init__(
            self.arch_settings[arch],
            deepen_factor,
            widen_factor,
            input_channels=input_channels,
            out_indices=out_indices,
            plugins=plugins,
            frozen_stages=frozen_stages,
            norm_cfg=norm_cfg,
            act_cfg=act_cfg,
            norm_eval=norm_eval,
            init_cfg=init_cfg)

    def build_stem_layer(self) -> nn.Module:
        """Build a stem layer."""
        return ConvModule(
            self.input_channels,
            make_divisible(self.arch_setting[0][0], self.widen_factor),
            kernel_size=6,
            stride=2,
            padding=2,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg)

    def build_stage_layer(self, stage_idx: int, setting: list) -> list:
        """Build a stage layer.

        Args:
            stage_idx (int): The index of a stage layer.
            setting (list): The architecture setting of a stage layer.
        """
        in_channels, out_channels, num_blocks, add_identity, use_spp = setting

        in_channels = make_divisible(in_channels, self.widen_factor)
        out_channels = make_divisible(out_channels, self.widen_factor)
        num_blocks = make_round(num_blocks, self.deepen_factor)
        stage = []
        conv_layer = ConvModule(
            in_channels,
            out_channels,
            kernel_size=3,
            stride=2,
            padding=1,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg)
        stage.append(conv_layer)
        csp_layer = CSPLayer(
            out_channels,
            out_channels,
            num_blocks=num_blocks,
            add_identity=add_identity,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg)
        stage.append(csp_layer)
        if use_spp:
            spp = SPPFBottleneck(
                out_channels,
                out_channels,
                kernel_sizes=5,
                norm_cfg=self.norm_cfg,
                act_cfg=self.act_cfg)
            stage.append(spp)
        return stage

    def init_weights(self):
        """Initialize the parameters."""
        if self.init_cfg is None:
            for m in self.modules():
                if isinstance(m, torch.nn.Conv2d):
                    # In order to be consistent with the source code,
                    # reset the Conv2d initialization parameters
                    m.reset_parameters()
        else:
            super().init_weights()

class GELANBlock(nn.Module):
    """GELAN Block from YOLOv9, combining cross-stage partial connections and efficient aggregation."""
    def __init__(self, in_channels, out_channels, num_blocks, expand_ratio=0.5, norm_cfg=None, act_cfg=None):
        super().__init__()
        hidden_channels = int(out_channels * expand_ratio)
        
        # Main branch
        self.conv1 = ConvModule(in_channels, hidden_channels, 1, norm_cfg=norm_cfg, act_cfg=act_cfg)
        self.conv2 = ConvModule(in_channels, hidden_channels, 1, norm_cfg=norm_cfg, act_cfg=act_cfg)
        
        # Residual blocks on main branch
        self.blocks = nn.Sequential(*[
            nn.Sequential(
                ConvModule(hidden_channels, hidden_channels, 3, padding=1, norm_cfg=norm_cfg, act_cfg=act_cfg),
                ConvModule(hidden_channels, hidden_channels, 3, padding=1, norm_cfg=norm_cfg, act_cfg=act_cfg)
            ) for _ in range(num_blocks)
        ])
        
        # Cross-layer aggregation
        self.aggregation = ConvModule(hidden_channels * 2, out_channels, 1, norm_cfg=norm_cfg, act_cfg=act_cfg)

    def forward(self, x):
        x1 = self.conv1(x)
        x2 = self.conv2(x)
        x1 = self.blocks(x1)
        return self.aggregation(torch.cat((x1, x2), dim=1))


# This the backone with YOLOv9 
# @MODELS.register_module()
# class YOLOv8CSPDarknet(BaseBackbone):
#     print("this YOLOv9 is from the ENV log46 Mish")
#     # ========================
#     # Log46
#     arch_settings = {
#         'P5': [[64, 128, 3, True, False], 
#             [128, 256, 6, True, False],
#             [256, 512, 9, True, False],  # Deeper stage
#             [512, None, 3, True, True]],
#     }

#     def __init__(self,
#                  arch: str = 'P5',
#                  last_stage_out_channels: int = 1024,
#                  plugins: Union[dict, List[dict]] = None,
#                  deepen_factor: float = 1.0,
#                  widen_factor: float = 1.0,
#                  input_channels: int = 3,
#                  out_indices: Tuple[int] = (2, 3, 4), # original (2, 3, 4),
#                  frozen_stages: int = -1,
#                  norm_cfg: ConfigType = dict(
#                      type='BN', momentum=0.03, eps=0.001),
#                  act_cfg: ConfigType = dict(type='Mish', inplace=True), # SiLU, Mish
#                  norm_eval: bool = False,
#                  init_cfg: OptMultiConfig = None):
#         self.arch_settings[arch][-1][1] = last_stage_out_channels
#         super().__init__(
#             self.arch_settings[arch],
#             deepen_factor,
#             widen_factor,
#             input_channels=input_channels,
#             out_indices=out_indices,
#             plugins=plugins,
#             frozen_stages=frozen_stages,
#             norm_cfg=norm_cfg,
#             act_cfg=act_cfg,
#             norm_eval=norm_eval,
#             init_cfg=init_cfg)

#     def build_stem_layer(self) -> nn.Module:
#         """Build a stem layer."""
#         return ConvModule(
#             self.input_channels,
#             make_divisible(self.arch_setting[0][0], self.widen_factor),
#             kernel_size=3,
#             stride=2,
#             padding=1,
#             norm_cfg=self.norm_cfg,
#             act_cfg=self.act_cfg)

#     def build_stage_layer(self, stage_idx: int, setting: list) -> list:
#         """Build a stage layer.

#         Args:
#             stage_idx (int): The index of a stage layer.
#             setting (list): The architecture setting of a stage layer.
#         """
#         in_channels, out_channels, num_blocks, add_identity, use_spp = setting

#         in_channels = make_divisible(in_channels, self.widen_factor)
#         out_channels = make_divisible(out_channels, self.widen_factor)
#         num_blocks = make_round(num_blocks, self.deepen_factor)

#         stage = []
#         # Downsample Conv
#         stage.append(ConvModule(
#             in_channels, 
#             out_channels, 
#             3, 
#             stride=2, 
#             padding=1, 
#             norm_cfg=self.norm_cfg, 
#             act_cfg=self.act_cfg))

#         # Replace CSPLayer with GELAN
#         stage.append(GELANBlock(
#             out_channels, 
#             out_channels, 
#             num_blocks=num_blocks,
#             norm_cfg=self.norm_cfg, 
#             act_cfg=self.act_cfg))

#         if use_spp:
#             stage.append(SPPFBottleneck(
#                 out_channels, 
#                 out_channels, 
#                 kernel_sizes=5, 
#                 norm_cfg=self.norm_cfg, 
#                 act_cfg=self.act_cfg))
#         return stage

#     def init_weights(self):
#         """Initialize the parameters."""
#         if self.init_cfg is None:
#             for m in self.modules():
#                 if isinstance(m, torch.nn.Conv2d):
#                     nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
#                     m.weight.data *= 0.911  # Compensate for Mish variance
#                     # In order to be consistent with the source code,
#                     # reset the Conv2d initialization parameters
#                     # m.reset_parameters()
#         else:
#             super().init_weights()

# ====================
    # def __init__(self,
    #              arch: str = 'P5',
    #              last_stage_out_channels: int = 1024,
    #              plugins: Union[dict, List[dict]] = None,
    #              deepen_factor: float = 1.0,
    #              widen_factor: float = 1.0,
    #              input_channels: int = 3,
    #              out_indices: Tuple[int] = (2, 3, 4), # original (2, 3, 4),
    #              frozen_stages: int = -1,
    #              norm_cfg: ConfigType = dict(
    #                  type='BN', momentum=0.03, eps=0.001),
    #              act_cfg: ConfigType = dict(type='SiLU', inplace=True),
    #              norm_eval: bool = False,
    #              init_cfg: OptMultiConfig = None):
    #     self.arch_settings[arch][-1][1] = last_stage_out_channels
    #     super().__init__(
    #         self.arch_settings[arch],
    #         deepen_factor,
    #         widen_factor,
    #         input_channels=input_channels,
    #         out_indices=out_indices,
    #         plugins=plugins,
    #         frozen_stages=frozen_stages,
    #         norm_cfg=norm_cfg,
    #         act_cfg=act_cfg,
    #         norm_eval=norm_eval,
    #         init_cfg=init_cfg)

    # def build_stem_layer(self) -> nn.Module:
    #     """Build a stem layer."""
    #     return ConvModule(
    #         self.input_channels,
    #         make_divisible(self.arch_setting[0][0], self.widen_factor),
    #         kernel_size=3,
    #         stride=2,
    #         padding=1,
    #         norm_cfg=self.norm_cfg,
    #         act_cfg=self.act_cfg)

    # def build_stage_layer(self, stage_idx: int, setting: list) -> list:
    #     """Build a stage layer.

    #     Args:
    #         stage_idx (int): The index of a stage layer.
    #         setting (list): The architecture setting of a stage layer.
    #     """
    #     # Unpack all parameters from arch_settings
    #     in_channels, c2, c3, c4, num_blocks, use_adown = setting

    #     # Apply width scaling
    #     in_channels = make_divisible(in_channels, self.widen_factor)
    #     c2 = make_divisible(c2, self.widen_factor)
    #     c3 = make_divisible(c3, self.widen_factor)
    #     c4 = make_divisible(c4, self.widen_factor)
    #     num_blocks = make_round(num_blocks, self.deepen_factor)


    #     stage = []

    #     # 1. Always add RepNCSPELAN4 first
    #     rep_layer = RepNCSPELAN4(
    #         c1=in_channels,
    #         c2=c2,
    #         c3=c3,
    #         c4=c4,
    #         c5=num_blocks,
    #     )
    #     stage.append(rep_layer)

    #     # 2. Add ADown after RepNCSPELAN4 if needed
    #     if use_adown:
    #         adown_layer = ADown(
    #             c1=c2,
    #             c2=c2  # Maintain same channel count
    #         )
    #         stage.append(adown_layer)
    #         spp = SPPELAN(
    #             c2,
    #             c2,
    #             768 // 2 
    #         )
    #         stage.append(spp)
    #     print("stage ::::", stage)
    #     return stage

    # def init_weights(self):
    #     """Initialize the parameters."""
    #     if self.init_cfg is None:
    #         for m in self.modules():
    #             if isinstance(m, torch.nn.Conv2d):
    #                 # In order to be consistent with the source code,
    #                 # reset the Conv2d initialization parameters
    #                 m.reset_parameters()
    #     else:
    #         super().init_weights()
    # ====================

# @MODELS.register_module()
# class YOLOv8CSPDarknet(BaseBackbone): # YOLOv9Backbone - > YOLOv8CSPDarknet 
    # From left to right:
    # print("YOLOv9Backbone ====================> YOLOv9Backbone 1")

    # """YOLOv9 architecture using YOLOv8-compatible config structure."""
    
    # arch_settings = {
    #     'P5': [
    #         # [in_channels, out_channels, num_blocks, add_identity, use_adown]
    #         [64, 128, 1, True, True],    # stage0 (P3/8)
    #         [128, 256, 1, True, True],   # stage1 (P4/16)
    #         [256, 512, 1, True, True],   # stage2 (P5/32)
    #         [512, 1024, 1, True, False] # stage3 (final)
    #     ]
    # }

    # def __init__(self,
    #              arch: str = 'P5',
    #              last_stage_out_channels: int = 1024,
    #              deepen_factor: float = 1.0,
    #              widen_factor: float = 1.0,
    #              input_channels: int = 3,
    #              out_indices: Tuple[int] = (2, 3, 4),
    #              frozen_stages: int = -1,
    #              norm_cfg: ConfigType = dict(
    #                  type='BN', momentum=0.03, eps=0.001),
    #              act_cfg: ConfigType = dict(type='SiLU', inplace=True),
    #              **kwargs):
        
    #     # Create instance-specific arch settings
    #     self.arch_setting = [list(s) for s in self.arch_settings[arch]]
    #     self.arch_setting[-1][1] = last_stage_out_channels
        
    #     super().__init__(
    #         self.arch_settings[arch],
    #         deepen_factor=deepen_factor,
    #         widen_factor=widen_factor,
    #         input_channels=input_channels,
    #         out_indices=out_indices,
    #         frozen_stages=frozen_stages,
    #         norm_cfg=norm_cfg,
    #         act_cfg=act_cfg,
    #         **kwargs
    #     )

    # def build_stem_layer(self):
    #     """YOLOv9 Focus stem implementation."""
    #     # Access first stage parameters from instance settings
    #     first_stage_in = self.arch_setting[0][0]
    #     stem_channels = make_divisible(first_stage_in, self.widen_factor)
        
    #     return Focus(
    #         self.input_channels,
    #         stem_channels,
    #         kernel_size=3,
    #         norm_cfg=self.norm_cfg,
    #         act_cfg=self.act_cfg
    #     )


    # def build_stage_layer(self, stage_idx: int, setting: list):
    #     """YOLOv9 stage layer with config compatibility."""
    #     in_channels, out_channels, num_blocks, add_identity, use_adown = setting

    #     in_channels = make_divisible(in_channels, self.widen_factor)
    #     out_channels = make_divisible(out_channels, self.widen_factor)
    #     num_blocks = make_round(num_blocks, self.deepen_factor)

    #     c3 = out_channels * 2
    #     c4 = out_channels // 2
    #     c5 = num_blocks

    #     stage = nn.Sequential(
    #         Silence(),
    #         ConvModule(
    #             in_channels,
    #             out_channels,
    #             kernel_size=3,
    #             stride=2,
    #             padding=1,
    #             norm_cfg=self.norm_cfg,
    #             act_cfg=self.act_cfg
    #         ),
    #         RepNCSPELAN4(
    #             c1 = out_channels,
    #             c2 = out_channels,
    #             c3 = c3,
    #             c4 = c4,
    #             c5 = c5,
    #         )
    #     )

    #     if use_adown:
    #         stage.add_module('adown', ADown(out_channels, out_channels))

    #     return stage

    # def init_weights(self):
    #     for m in self.modules():
    #         if isinstance(m, nn.Conv2d):
    #             nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
    #         elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
    #             nn.init.constant_(m.weight, 1)
    #             nn.init.constant_(m.bias, 0)



# @MODELS.register_module()
# class YOLOv8CSPDarknet(BaseBackbone):
#     """CSP-Darknet backbone used in YOLOv8 with Transformer-based attention (replacing CBAM+Conv)."""

#     arch_settings = {
#         'P5': [[64, 128, 3, True, False],
#                [128, 256, 6, True, False],
#                [256, 512, 6, True, False],
#                [512, None, 3, True, True]],
#     }

#     def __init__(self,
#                  arch: str = 'P5',
#                  last_stage_out_channels: int = 1024,
#                  plugins: Union[dict, List[dict]] = None,
#                  deepen_factor: float = 1.0,
#                  widen_factor: float = 1.0,
#                  input_channels: int = 3,
#                  out_indices: Tuple[int] = (2, 3, 4),
#                  frozen_stages: int = -1,
#                  norm_cfg = dict(type='BN', momentum=0.03, eps=0.001),
#                  act_cfg = dict(type='SiLU', inplace=True),
#                  norm_eval: bool = False,
#                  init_cfg=None):
#         self.arch_settings[arch][-1][1] = last_stage_out_channels
#         # In real code, call super(..., your actual arguments)
#         super().__init__()
#         self.arch_setting = self.arch_settings[arch]
#         self.input_channels = input_channels
#         self.out_indices = out_indices
#         self.frozen_stages = frozen_stages
#         self.norm_cfg = norm_cfg
#         self.act_cfg = act_cfg
#         self.norm_eval = norm_eval
#         self.init_cfg = init_cfg

#         self.deepen_factor = deepen_factor
#         self.widen_factor = widen_factor
#         self.plugins = plugins

#         # 1) Build stem
#         self.stem = self.build_stem_layer()

#         # 2) Build stages
#         self.stages = []
#         for idx, setting in enumerate(self.arch_setting):
#             stage_modules = self.build_stage_layer(idx, setting)
#             stage = nn.Sequential(*stage_modules)
#             self.stages.append(stage)
#         self.stages = nn.ModuleList(self.stages)

#         # In real code: self.init_weights() or similar
#         self.init_weights()

#     def build_stem_layer(self) -> nn.Module:
#         """Build a stem layer with EfficientViT instead of CBAM."""
#         # Replace 'ConvWithCBAM' with 'ConvWithEfficientViT'
#         return ConvWithEfficientViT(
#             c1=self.input_channels,
#             c2=make_divisible(self.arch_setting[0][0], self.widen_factor),
#             k=3,
#             s=2,
#             # Additional transformer params
#             embed_dim=512,   # Adjust to your preference
#             num_heads=4,
#             mlp_ratio=4.0,
#             dropout=0.0
#         )

#     def build_stage_layer(self, stage_idx: int, setting: list) -> list:
#         """Build a stage layer with a ConvWithEfficientViT + CSP + SPPELAN (unchanged)."""
#         in_channels, out_channels, num_blocks, add_identity, use_spp = setting

#         in_channels = make_divisible(in_channels, self.widen_factor)
#         out_channels = make_divisible(out_channels, self.widen_factor)
#         num_blocks = make_round(num_blocks, self.deepen_factor)

#         stage = []
#         # 1) Replace original 'ConvWithCBAM' with 'ConvWithEfficientViT'
#         conv_layer = ConvWithEfficientViT(
#             c1=in_channels,
#             c2=out_channels,
#             k=3,
#             s=2,
#             embed_dim=512,  # Adjust or vary by stage
#             num_heads=4,
#             mlp_ratio=4.0,
#             dropout=0.0
#         )
#         stage.append(conv_layer)

#         # 2) CSP Layer (unchanged)
#         csp_layer = CSPLayer(
#             out_channels,
#             out_channels,
#             num_blocks=num_blocks,
#             add_identity=add_identity,
#             norm_cfg=self.norm_cfg,
#             act_cfg=self.act_cfg
#         )
#         stage.append(csp_layer)

#         # 3) SPPELAN (unchanged)
#         sppelan_layer = SPPELAN(
#             out_channels,
#             out_channels,
#             num_blocks
#         )
#         stage.append(sppelan_layer)


#         spp = SPPFBottleneck(
#             out_channels,
#             out_channels,
#             kernel_sizes=5,
#             norm_cfg=self.norm_cfg,
#             act_cfg=self.act_cfg
#         )
#         stage.append(spp)
#         # 4) Optional SPP
#         if use_spp:
#             spp = SPPFBottleneck(
#                 out_channels,
#                 out_channels,
#                 kernel_sizes=5,
#                 norm_cfg=self.norm_cfg,
#                 act_cfg=self.act_cfg
#             )
#             stage.append(spp)

#         return stage

#     def init_weights(self):
#         """Initialize the parameters (example)."""
#         if self.init_cfg is None:
#             for m in self.modules():
#                 if isinstance(m, torch.nn.Conv2d):
#                     # Basic init, or you can do kaiming_normal_
#                     nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
#                     if m.bias is not None:
#                         nn.init.constant_(m.bias, 0)
#         else:
#             # Or call your parent's init if you have a framework-specific approach
#             pass

#     def forward(self, x):
#         x = self.stem(x)
#         outputs = []
#         for i, stage in enumerate(self.stages):
#             x = stage(x)
#             if i in self.out_indices:
#                 outputs.append(x)
#         return tuple(outputs)

# Result bbox_mAP_copypaste: 0.444 0.603 0.483 0.273 0.487 0.590
# YOLOv8CSPDarknet Model with CBAM and SPPELAN 
# @MODELS.register_module()
# class YOLOv8CSPDarknet(BaseBackbone):
#     """CSP-Darknet backbone used in YOLOv8 with CBAM and SPPELAN integrated."""

#     arch_settings = {
#         'P5': [[64, 128, 3, True, False], [128, 256, 6, True, False],
#                [256, 512, 6, True, False], [512, None, 3, True, True]],
#     }

#     def __init__(self,
#                  arch: str = 'P5',
#                  last_stage_out_channels: int = 1024,
#                  plugins: Union[dict, List[dict]] = None,
#                  deepen_factor: float = 1.0,
#                  widen_factor: float = 1.0,
#                  input_channels: int = 3,
#                  out_indices: Tuple[int] = (2, 3, 4),
#                  frozen_stages: int = -1,
#                  norm_cfg: ConfigType = dict(
#                      type='BN', momentum=0.03, eps=0.001),
#                  act_cfg: ConfigType = dict(type='SiLU', inplace=True),
#                  norm_eval: bool = False,
#                  init_cfg: OptMultiConfig = None):
#         self.arch_settings[arch][-1][1] = last_stage_out_channels
#         super().__init__(
#             self.arch_settings[arch],
#             deepen_factor,
#             widen_factor,
#             input_channels=input_channels,
#             out_indices=out_indices,
#             plugins=plugins,
#             frozen_stages=frozen_stages,
#             norm_cfg=norm_cfg,
#             act_cfg=act_cfg,
#             norm_eval=norm_eval,
#             init_cfg=init_cfg)

#     def build_stem_layer(self) -> nn.Module:
#         """Build a stem layer with CBAM."""
#         return ConvWithCBAM(
#             self.input_channels,
#             make_divisible(self.arch_setting[0][0], self.widen_factor),
#             kernel_size=3,
#             stride=2,
#             padding=1,
#             act=True,
#         )

#     def build_stage_layer(self, stage_idx: int, setting: list) -> list:
#         """Build a stage layer with CBAM and SPPELAN integration."""
#         in_channels, out_channels, num_blocks, add_identity, use_spp = setting

#         in_channels = make_divisible(in_channels, self.widen_factor)
#         out_channels = make_divisible(out_channels, self.widen_factor)
#         num_blocks = make_round(num_blocks, self.deepen_factor)

#         stage = []
#         conv_layer = ConvWithCBAM(in_channels, out_channels, kernel_size=3, stride=2, padding=1, act=True)
#         stage.append(conv_layer)

#         # Integrate CSP Layer with CBAM
#         csp_layer = CSPLayer(
#             out_channels,
#             out_channels,
#             num_blocks=num_blocks,
#             add_identity=add_identity,
#             norm_cfg=self.norm_cfg,
#             act_cfg=self.act_cfg
#         )
#         stage.append(csp_layer)

#         # Integrate SPPELAN for multi-scale feature extraction
#         sppelan_layer = SPPELAN(
#             out_channels,
#             out_channels,
#             num_blocks=num_blocks,
#             add_identity=add_identity,
#             norm_cfg=self.norm_cfg,
#             act_cfg=self.act_cfg
#         )
#         stage.append(sppelan_layer)

#         # Integrate SPP if required
#         if use_spp:
#             spp = SPPFBottleneck(
#                 out_channels,
#                 out_channels,
#                 kernel_sizes=5,
#                 norm_cfg=self.norm_cfg,
#                 act_cfg=self.act_cfg
#             )
#             stage.append(spp)

#         return stage

#     def init_weights(self):
#         """Initialize the parameters."""
#         if self.init_cfg is None:
#             for m in self.modules():
#                 if isinstance(m, torch.nn.Conv2d):
#                     m.reset_parameters()
#         else:
#             super().init_weights()



@MODELS.register_module()
class YOLOXCSPDarknet(BaseBackbone):
    """CSP-Darknet backbone used in YOLOX.

    Args:
        arch (str): Architecture of CSP-Darknet, from {P5, P6}.
            Defaults to P5.
        plugins (list[dict]): List of plugins for stages, each dict contains:

            - cfg (dict, required): Cfg dict to build plugin.
            - stages (tuple[bool], optional): Stages to apply plugin, length
              should be same as 'num_stages'.
        deepen_factor (float): Depth multiplier, multiply number of
            blocks in CSP layer by this amount. Defaults to 1.0.
        widen_factor (float): Width multiplier, multiply number of
            channels in each layer by this amount. Defaults to 1.0.
        input_channels (int): Number of input image channels. Defaults to 3.
        out_indices (Tuple[int]): Output from which stages.
            Defaults to (2, 3, 4).
        frozen_stages (int): Stages to be frozen (stop grad and set eval
            mode). -1 means not freezing any parameters. Defaults to -1.
        use_depthwise (bool): Whether to use depthwise separable convolution.
            Defaults to False.
        spp_kernal_sizes: (tuple[int]): Sequential of kernel sizes of SPP
            layers. Defaults to (5, 9, 13).
        norm_cfg (dict): Dictionary to construct and config norm layer.
            Defaults to dict(type='BN', momentum=0.03, eps=0.001).
        act_cfg (dict): Config dict for activation layer.
            Defaults to dict(type='SiLU', inplace=True).
        norm_eval (bool): Whether to set norm layers to eval mode, namely,
            freeze running stats (mean and var). Note: Effect on Batch Norm
            and its variants only.
        init_cfg (Union[dict,list[dict]], optional): Initialization config
            dict. Defaults to None.
    Example:
        >>> from mmyolo.models import YOLOXCSPDarknet
        >>> import torch
        >>> model = YOLOXCSPDarknet()
        >>> model.eval()
        >>> inputs = torch.rand(1, 3, 416, 416)
        >>> level_outputs = model(inputs)
        >>> for level_out in level_outputs:
        ...     print(tuple(level_out.shape))
        ...
        (1, 256, 52, 52)
        (1, 512, 26, 26)
        (1, 1024, 13, 13)
    """
    # From left to right:
    # in_channels, out_channels, num_blocks, add_identity, use_spp
    arch_settings = {
        'P5': [[64, 128, 3, True, False], [128, 256, 9, True, False],
               [256, 512, 9, True, False], [512, 1024, 3, False, True]],
    }

    def __init__(self,
                 arch: str = 'P5',
                 plugins: Union[dict, List[dict]] = None,
                 deepen_factor: float = 1.0,
                 widen_factor: float = 1.0,
                 input_channels: int = 3,
                 out_indices: Tuple[int] = (2, 3, 4),
                 frozen_stages: int = -1,
                 use_depthwise: bool = False,
                 spp_kernal_sizes: Tuple[int] = (5, 9, 13),
                 norm_cfg: ConfigType = dict(
                     type='BN', momentum=0.03, eps=0.001),
                 act_cfg: ConfigType = dict(type='SiLU', inplace=True),
                 norm_eval: bool = False,
                 init_cfg: OptMultiConfig = None):
        self.use_depthwise = use_depthwise
        self.spp_kernal_sizes = spp_kernal_sizes
        super().__init__(self.arch_settings[arch], deepen_factor, widen_factor,
                         input_channels, out_indices, frozen_stages, plugins,
                         norm_cfg, act_cfg, norm_eval, init_cfg)

    def build_stem_layer(self) -> nn.Module:
        """Build a stem layer."""
        return Focus(
            3,
            make_divisible(64, self.widen_factor),
            kernel_size=3,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg)

    def build_stage_layer(self, stage_idx: int, setting: list) -> list:
        """Build a stage layer.

        Args:
            stage_idx (int): The index of a stage layer.
            setting (list): The architecture setting of a stage layer.
        """
        in_channels, out_channels, num_blocks, add_identity, use_spp = setting

        in_channels = make_divisible(in_channels, self.widen_factor)
        out_channels = make_divisible(out_channels, self.widen_factor)
        num_blocks = make_round(num_blocks, self.deepen_factor)
        stage = []
        conv = DepthwiseSeparableConvModule \
            if self.use_depthwise else ConvModule
        conv_layer = conv(
            in_channels,
            out_channels,
            kernel_size=3,
            stride=2,
            padding=1,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg)
        stage.append(conv_layer)
        if use_spp:
            spp = SPPFBottleneck(
                out_channels,
                out_channels,
                kernel_sizes=self.spp_kernal_sizes,
                norm_cfg=self.norm_cfg,
                act_cfg=self.act_cfg)
            stage.append(spp)
        csp_layer = CSPLayer(
            out_channels,
            out_channels,
            num_blocks=num_blocks,
            add_identity=add_identity,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg)
        stage.append(csp_layer)
        return stage