# Copyright (c) Tencent Inc. All rights reserved.
import copy
from typing import List, Union, Tuple, Dict, Optional

import torch, time
import torch.nn as nn
from torch import Tensor
from mmdet.utils import ConfigType, OptMultiConfig
import torch.nn.functional as F
from mmyolo.registry import MODELS
from mmyolo.models.utils import make_divisible, make_round
from mmyolo.models.layers import (RepNCSPELAN4, SPPELAN, ADown, 
                     CBLinear, CBFuse, Concat, Conv, DFL)
from mmyolo.models.necks.yolov8_pafpn import YOLOv8PAFPN
from .CSPLayerWithTwoConvText import CSPLayerWithTwoConvText  

class ChannelError(RuntimeError):
    """Custom error for channel dimension violations with detailed diagnostics."""
    
    def __init__(self, stage: str, expected: Tuple[int, int], actual: Tuple[int, int], context: Dict):
        message = (
            f"Channel dimension violation at stage '{stage}'\n"
            f"Expected: (in={expected[0]}, out={expected[1]})\n"
            f"Actual:   (in={actual[0]}, out={actual[1]})\n"
            "Context:\n" + "\n".join(f"{k}: {v}" for k,v in context.items())
        )
        super().__init__(message)

class ChannelGuardian():
    """Complete dimension management system with validation and error tracking."""
    
    def __init__(self, config: Dict):
        super().__init__()
        self.config = config
        self.registry = {
            'current': config['backbone']['p5'],
            'history': [],
            'backbone_channels': config['backbone'],
            'neck_config': config['neck']
        }
        
    def validate_stage(self, 
                     stage_name: str,
                     input_tensor: torch.Tensor,
                     output_tensor: torch.Tensor) -> None:
        """Validate input/output dimensions for a processing stage."""
        expected_in = self.registry['neck_config'].get(stage_name, {}).get('in_channels')
        expected_out = self.registry['neck_config'].get(stage_name, {}).get('out_channels')
        
        actual_in = input_tensor.size(1)
        actual_out = output_tensor.size(1)
        
        error_context = {
            'stage': stage_name,
            'registry': self.registry,
            'input_shape': tuple(input_tensor.shape),
            'output_shape': tuple(output_tensor.shape)
        }
        
        if (expected_in and actual_in != expected_in) or (expected_out and actual_out != expected_out):
            raise ChannelError(
                stage=stage_name,
                expected=(expected_in, expected_out),
                actual=(actual_in, actual_out),
                context=error_context
            )
            
        self.registry.update({
            'current': actual_out,
            'history': self.registry['history'] + [{
                'stage': stage_name,
                'in': actual_in,
                'out': actual_out,
                'shape': tuple(output_tensor.shape)
            }]
        })

    def prepare_concat(self, main_channels: int, branch_channels: int) -> int:
        """Calculate and validate concatenated channels."""
        concat_channels = main_channels + branch_channels
        if concat_channels != self.registry['neck_config']['concat_channels']:
            raise ChannelError(
                stage='concatenation',
                expected=(None, self.registry['neck_config']['concat_channels']),
                actual=(None, concat_channels),
                context=self.registry
            )
        return concat_channels

    def resize_features(self, source: torch.Tensor, target_shape: Tuple[int, int]) -> torch.Tensor:
        """Safely resize features with dimension validation."""
        if len(target_shape) != 2:
            raise ValueError(f"Invalid target shape {target_shape} for resizing")
            
        return F.interpolate(
            source, 
            size=target_shape,
            mode='nearest'
        )

@MODELS.register_module()
class YOLOWorldPAFPN(YOLOv8PAFPN):
    """Fixed YOLOv9PAFPN implementation with channel compatibility"""
    
    def __init__(self,
                 in_channels: List[int],
                 out_channels: List[int],
                 guide_channels: int,
                 embed_channels: List[int],
                 num_heads: List[int],
                 deepen_factor: float = 1.0,
                 widen_factor: float = 1.0,
                 num_csp_blocks: int = 3,
                 freeze_all: bool = False,
                 block_cfg: ConfigType = dict(type='MaxSigmoidCSPLayerWithTwoConv'),
                 norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001),
                 act_cfg: ConfigType = dict(type='SiLU', inplace=True),
                 init_cfg: OptMultiConfig = None) -> None:
        
        self.stage_config = {
            'backbone': {
                'p3': in_channels[0],
                'p4': in_channels[1],
                'p5': in_channels[2]
            },
            'neck': {
                'sppelan': 256,
                'upsample_factors': [2, 2],
                'downsample_factors': [0.5, 0.5],
                'csp_reductions': {'up': [2, 2], 'down': [2, 2]}
            }
        }

        self.channel_guard = ChannelGuardian(self.stage_config)
        
        # Initialize parameters first
        # self.guide_channels = guide_channels
        # self.embed_channels = embed_channelss
        # self.num_heads = num_heads
        
        # # Explicit channel tracking
        # self.sppelan_out = 256  # SPPELAN output channels
        # self.upsample1_out = 512  # First upsample output
        # self.upsample2_out = 512  # Second upsample output

        # # 2. Validate parameters after assignment
        # assert len(embed_channels) == 3, "Requires 3 embed_channels for P3/P4/P5"
        # assert len(num_heads) == 3, "Requires 3 num_heads for P3/P4/P5"

        # 3. Now call parent constructor
        super().__init__(
            in_channels=in_channels,
            out_channels=out_channels,
            deepen_factor=deepen_factor,
            widen_factor=widen_factor,
            num_csp_blocks=num_csp_blocks,
            freeze_all=freeze_all,
            norm_cfg=norm_cfg,
            act_cfg=act_cfg,
            init_cfg=init_cfg
        )

 # Configuration validation
        if len(in_channels) != 3:
            raise ValueError("YOLOWorldPAFPN requires exactly 3 input channels (P3, P4, P5)")
            
        if len(out_channels) != 3:
            raise ValueError("YOLOWorldPAFPN requires exactly 3 output channels")
            
        # Architecture configuration
        self.config = {
            'backbone': {
                'p3': in_channels[0],
                'p4': in_channels[1],
                'p5': in_channels[2]
            },
            'neck': neck_config or {
                'sppelan': {
                    'in_channels': in_channels[2],
                    'out_channels': 256,
                    'num_channels': 5
                },
                'upsample': {
                    'factors': [2, 2],
                    'reductions': [2, 2]
                },
                'downsample': {
                    'factors': [0.5, 0.5],
                    'reductions': [2, 2]
                },
                'concat_channels': 768  # 256 + 512
            }
        }
        
        # Initialize guardian first
        self.guardian = ChannelGuardian(self.config)
        
        # Build components
        self._build_sppelan()
        self._build_upsample_path()
        self._build_downsample_path()
        self._build_auxiliary()
        
        # Text guidance parameters
        self.guide_channels = guide_channels
        self.embed_channels = embed_channels
        self.num_heads = num_heads

    def _build_sppelan(self):
        """Build SPPELAN module with validation."""
        cfg = self.config['neck']['sppelan']
        self.sppelan = SPPELAN(
            in_channels=cfg['in_channels'],
            out_channels=cfg['out_channels'],
            num_channels=cfg['num_channels']
        )
        
        # Initial validation
        dummy_input = torch.randn(1, cfg['in_channels'], 20, 20)
        output = self.sppelan(dummy_input)
        self.guardian.validate_stage('sppelan', dummy_input, output)

    def _build_upsample_path(self):
        """Construct the upsample path with dimension checks."""
        self.upsample_layers = nn.ModuleList()
        cfg = self.config['neck']['upsample']
        
        for i, (factor, reduction) in enumerate(zip(cfg['factors'], cfg['reductions'])):
            # Upsample layer
            self.upsample_layers.append(nn.Upsample(
                scale_factor=factor,
                mode='nearest'
            ))
            
            # Concatenation layer
            self.upsample_layers.append(Concat(dimension=1))
            
            # CSP processing
            in_channels = self.guardian.registry['current'] + self.config['backbone'][f'p{4-i}']
            out_channels = in_channels // reduction
            
            self.upsample_layers.append(RepNCSPELAN4(
                in_channels=in_channels,
                out_channels=out_channels,
                csp_channels=out_channels // 2,
                hidden_channels=out_channels // 2
            ))
            
            # Update registry
            self.guardian.registry['current'] = out_channels

    def _build_downsample_path(self):
        """Construct the downsample path with validation."""
        self.downsample_layers = nn.ModuleList()
        cfg = self.config['neck']['downsample']
        
        for i, (factor, reduction) in enumerate(zip(cfg['factors'], cfg['reductions'])):
            # Downsample layer
            current_channels = self.guardian.registry['current']
            self.downsample_layers.append(ADown(
                in_channels=current_channels,
                out_channels=int(current_channels * factor)
            ))
            
            # Concatenation layer
            self.downsample_layers.append(Concat(dimension=1))
            
            # CSP processing
            in_channels = int(current_channels * factor) + self.auxiliary_channels[i]
            out_channels = in_channels // reduction
            
            self.downsample_layers.append(RepNCSPELAN4(
                in_channels=in_channels,
                out_channels=out_channels,
                csp_channels=out_channels // 2,
                hidden_channels=out_channels // 2
            ))
            
            # Update registry
            self.guardian.registry['current'] = out_channels

    def _build_auxiliary(self):
        """Build auxiliary branches with dimension control."""
        self.auxiliary = nn.ModuleList([
            nn.Sequential(
                nn.Conv2d(self.config['backbone']['p4'], 256, 1),
                nn.BatchNorm2d(256),
                nn.SiLU()
            ),
            nn.Sequential(
                nn.Conv2d(self.config['backbone']['p5'], 512, 1),
                nn.BatchNorm2d(512),
                nn.SiLU()
            )
        ])
        self.auxiliary_channels = [256, 512]

    def forward(self, inputs: List[torch.Tensor], txt_feats: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor]:
        """
        Forward pass with comprehensive dimension validation.
        
        Args:
            inputs: List of backbone features [P3, P4, P5]
            txt_feats: Optional text guidance features
            
        Returns:
            Tuple of output features
        """
        # Validate input
        if len(inputs) != 3:
            raise ValueError(f"Expected 3 input features, got {len(inputs)}")
            
        p3, p4, p5 = inputs
        
        # Process through SPPELAN
        x = self.sppelan(p5)
        self.guardian.validate_stage('sppelan', p5, x)
        
        # Upsample path
        for i, layer in enumerate(self.upsample_layers):
            if isinstance(layer, Concat):
                # Handle concatenation with resizing
                branch = p4 if i == 1 else p3
                branch_resized = self.guardian.resize_features(branch, x.shape[-2:])
                x = torch.cat([x, branch_resized], dim=1)
                self.guardian.validate_stage(f'upsample_concat_{i//3}', x, x)
            else:
                x = layer(x)
                if isinstance(layer, RepNCSPELAN4):
                    self.guardian.validate_stage(f'upsample_csp_{i//3}', x, x)
        
        # Downsample path
        aux_features = [aux(p4) for aux in self.auxiliary]
        for i, layer in enumerate(self.downsample_layers):
            if isinstance(layer, Concat):
                # Handle auxiliary feature concatenation
                x = torch.cat([x, aux_features[i//3]], dim=1)
                self.guardian.validate_stage(f'downsample_concat_{i//3}', x, x)
            else:
                x = layer(x)
                if isinstance(layer, RepNCSPELAN4):
                    self.guardian.validate_stage(f'downsample_csp_{i//3}', x, x)
        
        return (x, p3, p4, p5)

    def apply_text_guidance(self, x: torch.Tensor, txt_feats: torch.Tensor) -> torch.Tensor:
        """Apply text guidance features with dimension validation."""
        if txt_feats is not None:
            if txt_feats.dim() != 3:
                raise ValueError(f"Text features must be 3D tensor, got {txt_feats.dim()}D")
                
            # Expand text features to match spatial dimensions
            txt_feats = txt_feats.unsqueeze(-1).unsqueeze(-1)
            txt_feats = txt_feats.expand(-1, -1, x.size(2), x.size(3))
            
            # Channel validation
            if txt_feats.size(1) != self.guide_channels:
                raise ChannelError(
                    stage='text_guidance',
                    expected=(None, self.guide_channels),
                    actual=(None, txt_feats.size(1)),
                    context=self.guardian.registry
                )
                
            # Feature fusion
            x = torch.cat([x, txt_feats], dim=1)
            x = self.text_fusion(x)
            
        return x


        # print("p3 : ", p3.shape)
        # print("p4 : ", p4.shape)
        # print("p5 : ", p5.shape)
        
    # print("+++++++++++ TESTING NEW YOLOWorldPAFPN +++++++++++")
    # """Path Aggregation Network for YOLO-World, including text→image fusion.

    # This version modifies the default YOLOv8 PAFPN to:
    #   1) Use a text-aware CSP block (e.g. CSPLayerWithTwoConvText).
    #   2) Pass additional config (guide_channels, embed_channels, num_heads, etc.).
    #   3) Optionally enable multi-stage cross-attn (num_fusion_stages) or FiLM gating (use_film).

    # Args:
    #     in_channels (List[int]): Channels of backbone features (C3, C4, C5).
    #     out_channels (List[int] or int): Channels after each PAN stage.
    #     guide_channels (int): Dimension for image/text projections in cross-attn.
    #     embed_channels (List[int]): Dimension of text embeddings per scale.
    #     num_heads (List[int]): Number of attention heads per scale.
    #     num_fusion_stages (int): How many times to run cross-attn in each CSP block.
    #     use_film (bool): Whether to apply FiLM gating in each CSP block.
    #     deepen_factor (float): YOLOv8 param for controlling depth.
    #     widen_factor (float): YOLOv8 param for controlling width.
    #     num_csp_blocks (int): Number of DarknetBottleneck blocks in each CSP layer.
    #     freeze_all (bool): If True, freeze all parameters in this neck.
    #     block_cfg (dict): Base config for the CSP block. We override 'type' and text params.
    #     norm_cfg (dict): Normalization config (BN by default).
    #     act_cfg (dict): Activation config (SiLU by default).
    #     init_cfg: Initialization config.
    # """

    # def __init__(self,
    #              in_channels: List[int],
    #              out_channels: Union[List[int], int],
    #              guide_channels: int,
    #              embed_channels: List[int],
    #              num_heads: List[int],
    #              num_fusion_stages: int = 1,
    #              use_film: bool = False,
    #              deepen_factor: float = 1.0,
    #              widen_factor: float = 1.0,
    #              num_csp_blocks: int = 3,
    #              freeze_all: bool = False,
    #              block_cfg: dict = dict(type='CSPLayerWithTwoConvText'),
    #              norm_cfg: dict = dict(type='BN', momentum=0.03, eps=0.001),
    #              act_cfg: dict = dict(type='SiLU', inplace=True),
    #              init_cfg=None) -> None:

    #     self.guide_channels = guide_channels
    #     self.embed_channels = embed_channels
    #     self.num_heads = num_heads
    #     self.num_fusion_stages = num_fusion_stages
    #     self.use_film = use_film
    #     self.block_cfg = block_cfg

    #     super().__init__(in_channels=in_channels,
    #                      out_channels=out_channels,
    #                      deepen_factor=deepen_factor,
    #                      widen_factor=widen_factor,
    #                      num_csp_blocks=num_csp_blocks,
    #                      freeze_all=freeze_all,
    #                      norm_cfg=norm_cfg,
    #                      act_cfg=act_cfg,
    #                      init_cfg=init_cfg)

    # def build_top_down_layer(self, idx: int) -> nn.Module:
    #     """Build a top-down layer for the FPN path.

    #     Args:
    #         idx (int): Which layer index we’re building.

    #     Returns:
    #         nn.Module: The top-down CSP block with text fusion.
    #     """
    #     block_cfg = copy.deepcopy(self.block_cfg)
    #     # Ensure we use the text-aware CSP class
    #     block_cfg.update(dict(
    #         type='CSPLayerWithTwoConvText',  # or whatever your text-based CSP is named
    #         in_channels=make_divisible(self.in_channels[idx - 1] + self.in_channels[idx],
    #                                    self.widen_factor),
    #         out_channels=make_divisible(self.out_channels[idx - 1],
    #                                     self.widen_factor),
    #         guide_channels=256,
    #         embed_channels=512,
    #         num_heads=512//32,
    #         num_blocks=make_round(self.num_csp_blocks,
    #                               self.deepen_factor),
    #         num_fusion_stages=self.num_fusion_stages,
    #         use_film=self.use_film,
    #         add_identity=False,
    #         norm_cfg=self.norm_cfg,
    #         act_cfg=self.act_cfg
    #     ))
    #     return MODELS.build(block_cfg)

    # def build_bottom_up_layer(self, idx: int) -> nn.Module:
    #     """Build a bottom-up layer for the PAN path.

    #     Args:
    #         idx (int): Which layer index we’re building.

    #     Returns:
    #         nn.Module: The bottom-up CSP block with text fusion.
    #     """
    #     block_cfg = copy.deepcopy(self.block_cfg)
    #     # Same approach for bottom-up
    #     block_cfg.update(dict(
    #         type='CSPLayerWithTwoConvText',
    #         in_channels=make_divisible(self.out_channels[idx] + self.out_channels[idx + 1],
    #                                    self.widen_factor),
    #         out_channels=make_divisible(self.out_channels[idx + 1],
    #                                     self.widen_factor),
    #         guide_channels=256,
    #         embed_channels=512,
    #         num_heads=512//32,
    #         num_blocks=make_round(self.num_csp_blocks,
    #                               self.deepen_factor),
    #         num_fusion_stages=self.num_fusion_stages,
    #         use_film=self.use_film,
    #         add_identity=False,
    #         norm_cfg=self.norm_cfg,
    #         act_cfg=self.act_cfg
    #     ))
    #     return MODELS.build(block_cfg)

    # def forward(self, img_feats: List[torch.Tensor], txt_feats: torch.Tensor = None) -> tuple:
    #     """Forward pass with text embeddings.

    #     Args:
    #         img_feats (List[Tensor]): List of feature maps from backbone (C3, C4, C5).
    #         txt_feats (Tensor, optional): Text embeddings, shape [B, L, D] or [B, D].
    #                                       Defaults to None.

    #     Returns:
    #         tuple: multi-scale output feature maps from the final PAN layers.
    #     """
    #     assert len(img_feats) == len(self.in_channels)
    #     # 1. Reduce layers
    #     reduce_outs = []
    #     for idx in range(len(self.in_channels)):
    #         reduce_outs.append(self.reduce_layers[idx](img_feats[idx]))

    #     # 2. Top-Down path
    #     inner_outs = [reduce_outs[-1]]
    #     for idx in range(len(self.in_channels) - 1, 0, -1):
    #         feat_high = inner_outs[0]
    #         feat_low = reduce_outs[idx - 1]
    #         upsample_feat = self.upsample_layers[len(self.in_channels) - 1 - idx](feat_high)

    #         if self.upsample_feats_cat_first:
    #             top_down_layer_inputs = torch.cat([upsample_feat, feat_low], dim=1)
    #         else:
    #             top_down_layer_inputs = torch.cat([feat_low, upsample_feat], dim=1)

    #         # pass text feats to the CSP block
    #         inner_out = self.top_down_layers[len(self.in_channels) - 1 - idx](
    #             top_down_layer_inputs, txt_feats
    #         )
    #         inner_outs.insert(0, inner_out)

    #     # 3. Bottom-Up path
    #     outs = [inner_outs[0]]
    #     for idx in range(len(self.in_channels) - 1):
    #         feat_low = outs[-1]
    #         feat_high = inner_outs[idx + 1]
    #         downsample_feat = self.downsample_layers[idx](feat_low)

    #         out = self.bottom_up_layers[idx](
    #             torch.cat([downsample_feat, feat_high], dim=1),
    #             txt_feats
    #         )
    #         outs.append(out)

    #     # 4. Out Layers
    #     results = []
    #     for idx in range(len(self.in_channels)):
    #         results.append(self.out_layers[idx](outs[idx]))

    #     return tuple(results)

# This is the original
# class YOLOWorldPAFPN(YOLOv8PAFPN):
    # """Path Aggregation Network used in YOLO World
    # Following YOLOv8 PAFPN, including text to image fusion
    # """
    # def __init__(self,
    #              in_channels: List[int],
    #              out_channels: Union[List[int], int],
    #              guide_channels: int,
    #              embed_channels: List[int],
    #              num_heads: List[int],
    #              deepen_factor: float = 1.0,
    #              widen_factor: float = 1.0,
    #              num_csp_blocks: int = 3,
    #              freeze_all: bool = False,
    #              block_cfg: ConfigType = dict(type='CSPLayerWithTwoConv'),
    #              norm_cfg: ConfigType = dict(type='BN',
    #                                          momentum=0.03,
    #                                          eps=0.001),
    #              act_cfg: ConfigType = dict(type='SiLU', inplace=True),
    #              init_cfg: OptMultiConfig = None) -> None:
    #     self.guide_channels = guide_channels
    #     self.embed_channels = embed_channels
    #     self.num_heads = num_heads
    #     self.block_cfg = block_cfg
    #     super().__init__(in_channels=in_channels,
    #                      out_channels=out_channels,
    #                      deepen_factor=deepen_factor,
    #                      widen_factor=widen_factor,
    #                      num_csp_blocks=num_csp_blocks,
    #                      freeze_all=freeze_all,
    #                      norm_cfg=norm_cfg,
    #                      act_cfg=act_cfg,
    #                      init_cfg=init_cfg)

    # def build_top_down_layer(self, idx: int) -> nn.Module:
    #     """build top down layer.

    #     Args:
    #         idx (int): layer idx.

    #     Returns:
    #         nn.Module: The top down layer.
    #     """
    #     block_cfg = copy.deepcopy(self.block_cfg)
    #     block_cfg.update(
    #         dict(in_channels=make_divisible(
    #             (self.in_channels[idx - 1] + self.in_channels[idx]),
    #             self.widen_factor),
    #              out_channels=make_divisible(self.out_channels[idx - 1],
    #                                          self.widen_factor),
    #              guide_channels=self.guide_channels,
    #              embed_channels=make_round(self.embed_channels[idx - 1],
    #                                        self.widen_factor),
    #              num_heads=make_round(self.num_heads[idx - 1],
    #                                   self.widen_factor),
    #              num_blocks=make_round(self.num_csp_blocks,
    #                                    self.deepen_factor),
    #              add_identity=False,
    #              norm_cfg=self.norm_cfg,
    #              act_cfg=self.act_cfg))
    #     return MODELS.build(block_cfg)

    # def build_bottom_up_layer(self, idx: int) -> nn.Module:
    #     """build bottom up layer.

    #     Args:
    #         idx (int): layer idx.

    #     Returns:
    #         nn.Module: The bottom up layer.
    #     """
    #     block_cfg = copy.deepcopy(self.block_cfg)
    #     block_cfg.update(
    #         dict(in_channels=make_divisible(
    #             (self.out_channels[idx] + self.out_channels[idx + 1]),
    #             self.widen_factor),
    #              out_channels=make_divisible(self.out_channels[idx + 1],
    #                                          self.widen_factor),
    #              guide_channels=self.guide_channels,
    #              embed_channels=make_round(self.embed_channels[idx + 1],
    #                                        self.widen_factor),
    #              num_heads=make_round(self.num_heads[idx + 1],
    #                                   self.widen_factor),
    #              num_blocks=make_round(self.num_csp_blocks,
    #                                    self.deepen_factor),
    #              add_identity=False,
    #              norm_cfg=self.norm_cfg,
    #              act_cfg=self.act_cfg))
    #     return MODELS.build(block_cfg)

    # def forward(self, img_feats: List[Tensor], txt_feats: Tensor = None) -> tuple:
    #     """Forward function.
    #     including multi-level image features, text features: BxLxD
    #     """
    #     assert len(img_feats) == len(self.in_channels)
    #     # reduce layers
    #     reduce_outs = []
    #     for idx in range(len(self.in_channels)):
    #         reduce_outs.append(self.reduce_layers[idx](img_feats[idx]))

    #     # top-down path
    #     inner_outs = [reduce_outs[-1]]
    #     for idx in range(len(self.in_channels) - 1, 0, -1):
    #         feat_high = inner_outs[0]
    #         feat_low = reduce_outs[idx - 1]
    #         upsample_feat = self.upsample_layers[len(self.in_channels) - 1 -
    #                                              idx](feat_high)
    #         if self.upsample_feats_cat_first:
    #             top_down_layer_inputs = torch.cat([upsample_feat, feat_low], 1)
    #         else:
    #             top_down_layer_inputs = torch.cat([feat_low, upsample_feat], 1)
    #         inner_out = self.top_down_layers[len(self.in_channels) - 1 - idx](
    #             top_down_layer_inputs, txt_feats)
    #         inner_outs.insert(0, inner_out)

    #     # bottom-up path
    #     outs = [inner_outs[0]]
    #     for idx in range(len(self.in_channels) - 1):
    #         feat_low = outs[-1]
    #         feat_high = inner_outs[idx + 1]
    #         downsample_feat = self.downsample_layers[idx](feat_low)
    #         out = self.bottom_up_layers[idx](torch.cat(
    #             [downsample_feat, feat_high], 1), txt_feats)
    #         outs.append(out)

    #     # out_layers
    #     results = []
    #     for idx in range(len(self.in_channels)):
    #         results.append(self.out_layers[idx](outs[idx]))

    #     return tuple(results)


@MODELS.register_module()
class YOLOWorldDualPAFPN(YOLOWorldPAFPN):
    """Path Aggregation Network used in YOLO World v8."""
    def __init__(self,
                 in_channels: List[int],
                 out_channels: Union[List[int], int],
                 guide_channels: int,
                 embed_channels: List[int],
                 num_heads: List[int],
                 deepen_factor: float = 1.0,
                 widen_factor: float = 1.0,
                 num_csp_blocks: int = 3,
                 freeze_all: bool = False,
                 text_enhancder: ConfigType = dict(
                     type='ImagePoolingAttentionModule',
                     embed_channels=256,
                     num_heads=8,
                     pool_size=3),
                 block_cfg: ConfigType = dict(type='CSPLayerWithTwoConv'),
                 norm_cfg: ConfigType = dict(type='BN',
                                             momentum=0.03,
                                             eps=0.001),
                 act_cfg: ConfigType = dict(type='SiLU', inplace=True),
                 init_cfg: OptMultiConfig = None) -> None:
        super().__init__(in_channels=in_channels,
                         out_channels=out_channels,
                         guide_channels=guide_channels,
                         embed_channels=embed_channels,
                         num_heads=num_heads,
                         deepen_factor=deepen_factor,
                         widen_factor=widen_factor,
                         num_csp_blocks=num_csp_blocks,
                         freeze_all=freeze_all,
                         block_cfg=block_cfg,
                         norm_cfg=norm_cfg,
                         act_cfg=act_cfg,
                         init_cfg=init_cfg)

        text_enhancder.update(
            dict(
                image_channels=[int(x * widen_factor) for x in out_channels],
                text_channels=guide_channels,
                num_feats=len(out_channels),
            ))
        print(text_enhancder)
        self.text_enhancer = MODELS.build(text_enhancder)

    def forward(self, img_feats: List[Tensor], txt_feats: Tensor) -> tuple:
        """Forward function."""
        assert len(img_feats) == len(self.in_channels)
        # reduce layers
        reduce_outs = []
        for idx in range(len(self.in_channels)):
            reduce_outs.append(self.reduce_layers[idx](img_feats[idx]))

        # top-down path
        inner_outs = [reduce_outs[-1]]
        for idx in range(len(self.in_channels) - 1, 0, -1):
            feat_high = inner_outs[0]
            feat_low = reduce_outs[idx - 1]
            upsample_feat = self.upsample_layers[len(self.in_channels) - 1 -
                                                 idx](feat_high)
            if self.upsample_feats_cat_first:
                top_down_layer_inputs = torch.cat([upsample_feat, feat_low], 1)
            else:
                top_down_layer_inputs = torch.cat([feat_low, upsample_feat], 1)
            inner_out = self.top_down_layers[len(self.in_channels) - 1 - idx](
                top_down_layer_inputs, txt_feats)
            inner_outs.insert(0, inner_out)

        txt_feats = self.text_enhancer(txt_feats, inner_outs)
        # bottom-up path
        outs = [inner_outs[0]]
        for idx in range(len(self.in_channels) - 1):
            feat_low = outs[-1]
            feat_high = inner_outs[idx + 1]
            downsample_feat = self.downsample_layers[idx](feat_low)
            out = self.bottom_up_layers[idx](torch.cat(
                [downsample_feat, feat_high], 1), txt_feats)
            outs.append(out)

        # out_layers
        results = []
        for idx in range(len(self.in_channels)):
            results.append(self.out_layers[idx](outs[idx]))

        return tuple(results)
