# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Union

import torch.nn as nn
from mmdet.utils import ConfigType, OptMultiConfig

from mmyolo.registry import MODELS
from mmcv.cnn import ConvModule
from .. import CSPLayerWithTwoConv
from ..utils import make_divisible, make_round
from .yolov5_pafpn import YOLOv5PAFPN
from .base_yolo_neck import BaseYOLONeck
from ..layers import (RepNCSPELAN4, SPPELAN, ADown, 
                     CBLinear, CBFuse, Concat, Conv, DFL)


@MODELS.register_module()
# class YOLOv8PAFPN(YOLOv5PAFPN):
class YOLOv8PAFPN(BaseYOLONeck):
    """YOLOv9 Neck with RepNCSPELAN4 blocks and ADown modules.

    Args:
        in_channels (List[int]): Input channels from backbone layers
        out_channels (int): Base output channels
        deepen_factor (float): Depth control factor
        widen_factor (float): Width control factor
        spp_elan_channels (List[int]): Channels for SPPELAN module
        freeze_all (bool): Freeze all parameters
        norm_cfg (dict): Normalization config
        act_cfg (dict): Activation config
        init_cfg (dict): Initialization config
    """

    def __init__(self,
                 in_channels: List[int],
                 out_channels: int,
                 deepen_factor: float = 1.0,
                 widen_factor: float = 1.0,
                 spp_elan_channels: List[int] = [512, 256],
                 freeze_all: bool = False,
                 norm_cfg: ConfigType = None,
                 act_cfg: ConfigType = None,
                 init_cfg: OptMultiConfig = None,
                 **kwargs):
        super().__init__(
            in_channels=in_channels,
            out_channels=out_channels,
            deepen_factor=deepen_factor,
            widen_factor=widen_factor,
            upsample_feats_cat_first=True,
            freeze_all=freeze_all,
            norm_cfg=norm_cfg,
            act_cfg=act_cfg,
            init_cfg=init_cfg,
            **kwargs
        )
        self.spp_elan_channels = spp_elan_channels

    def build_reduce_layer(self, idx: int) -> nn.Module:
        """Build reduce layer with SPPELAN for last layer."""
        if idx == len(self.in_channels) - 1:
            # SPPELAN for deepest layer
            return SPPELAN(
                in_channels=self.in_channels[idx],
                out_channels=self.spp_elan_channels[1],
                hidden_channels=self.spp_elan_channels[0],
                norm_cfg=self.norm_cfg,
                act_cfg=self.act_cfg
            )
        return ConvModule(
            in_channels=self.in_channels[idx],
            out_channels=int(self.out_channels * self.widen_factor),
            kernel_size=1,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg
        )

    def build_upsample_layer(self, idx: int) -> nn.Module:
        """Build upsample layer with nearest interpolation."""
        return nn.Sequential(
            nn.Upsample(scale_factor=2, mode='nearest'),
            ConvModule(
                in_channels=int(self.out_channels * self.widen_factor),
                out_channels=int(self.out_channels * self.widen_factor),
                kernel_size=3,
                padding=1,
                norm_cfg=self.norm_cfg,
                act_cfg=self.act_cfg
            )
        )

    def build_top_down_layer(self, idx: int) -> nn.Module:
        """Build RepNCSPELAN4 block for top-down path."""
        return RepNCSPELAN4(
            in_channels=int(2 * self.out_channels * self.widen_factor),
            out_channels=int(self.out_channels * self.widen_factor),
            hidden_channels=int(self.out_channels * self.widen_factor // 2),
            num_layers=3 * self.deepen_factor,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg
        )

    def build_downsample_layer(self, idx: int) -> nn.Module:
        """Build ADown layer for bottom-up path."""
        return ADown(
            in_channels=int(self.out_channels * self.widen_factor),
            out_channels=int(self.out_channels * self.widen_factor),
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg
        )

    def build_bottom_up_layer(self, idx: int) -> nn.Module:
        """Build RepNCSPELAN4 block for bottom-up path."""
        return RepNCSPELAN4(
            in_channels=int(2 * self.out_channels * self.widen_factor),
            out_channels=int(self.out_channels * self.widen_factor),
            hidden_channels=int(self.out_channels * self.widen_factor // 2),
            num_layers=3 * self.deepen_factor,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg
        )

    def build_out_layer(self, idx: int) -> nn.Module:
        """Build output convolution layer."""
        return ConvModule(
            in_channels=int(self.out_channels * self.widen_factor),
            out_channels=int(self.out_channels * self.widen_factor),
            kernel_size=3,
            padding=1,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg
        )
    # This the original 
    # """Path Aggregation Network used in YOLOv8.

    # Args:
    #     in_channels (List[int]): Number of input channels per scale.
    #     out_channels (int): Number of output channels (used at each scale)
    #     deepen_factor (float): Depth multiplier, multiply number of
    #         blocks in CSP layer by this amount. Defaults to 1.0.
    #     widen_factor (float): Width multiplier, multiply number of
    #         channels in each layer by this amount. Defaults to 1.0.
    #     num_csp_blocks (int): Number of bottlenecks in CSPLayer. Defaults to 1.
    #     freeze_all(bool): Whether to freeze the model
    #     norm_cfg (dict): Config dict for normalization layer.
    #         Defaults to dict(type='BN', momentum=0.03, eps=0.001).
    #     act_cfg (dict): Config dict for activation layer.
    #         Defaults to dict(type='SiLU', inplace=True).
    #     init_cfg (dict or list[dict], optional): Initialization config dict.
    #         Defaults to None.
    # """

    # def __init__(self,
    #              in_channels: List[int],
    #              out_channels: Union[List[int], int],
    #              deepen_factor: float = 1.0,
    #              widen_factor: float = 1.0,
    #              num_csp_blocks: int = 3,
    #              freeze_all: bool = False,
    #              norm_cfg: ConfigType = dict(
    #                  type='BN', momentum=0.03, eps=0.001),
    #              act_cfg: ConfigType = dict(type='SiLU', inplace=True),
    #              init_cfg: OptMultiConfig = None):
    #     super().__init__(
    #         in_channels=in_channels,
    #         out_channels=out_channels,
    #         deepen_factor=deepen_factor,
    #         widen_factor=widen_factor,
    #         num_csp_blocks=num_csp_blocks,
    #         freeze_all=freeze_all,
    #         norm_cfg=norm_cfg,
    #         act_cfg=act_cfg,
    #         init_cfg=init_cfg)

    # def build_reduce_layer(self, idx: int) -> nn.Module:
    #     """build reduce layer.

    #     Args:
    #         idx (int): layer idx.

    #     Returns:
    #         nn.Module: The reduce layer.
    #     """
    #     return nn.Identity()

    # def build_top_down_layer(self, idx: int) -> nn.Module:
    #     """build top down layer.

    #     Args:
    #         idx (int): layer idx.

    #     Returns:
    #         nn.Module: The top down layer.
    #     """
    #     return CSPLayerWithTwoConv(
    #         make_divisible((self.in_channels[idx - 1] + self.in_channels[idx]),
    #                        self.widen_factor),
    #         make_divisible(self.out_channels[idx - 1], self.widen_factor),
    #         num_blocks=make_round(self.num_csp_blocks, self.deepen_factor),
    #         add_identity=False,
    #         norm_cfg=self.norm_cfg,
    #         act_cfg=self.act_cfg)

    # def build_bottom_up_layer(self, idx: int) -> nn.Module:
    #     """build bottom up layer.

    #     Args:
    #         idx (int): layer idx.

    #     Returns:
    #         nn.Module: The bottom up layer.
    #     """
    #     return CSPLayerWithTwoConv(
    #         make_divisible(
    #             (self.out_channels[idx] + self.out_channels[idx + 1]),
    #             self.widen_factor),
    #         make_divisible(self.out_channels[idx + 1], self.widen_factor),
    #         num_blocks=make_round(self.num_csp_blocks, self.deepen_factor),
    #         add_identity=False,
    #         norm_cfg=self.norm_cfg,
    #         act_cfg=self.act_cfg)
