class RepNCSPELAN4(nn.Module):
    def __init__(self, c1, c2, c3, c4, c5=1):
        super().__init__()
        # Ensure even split for channel operations
        assert c3 % 2 == 0, "c3 must be even for channel splitting"
        self.c = c3 // 2
        
        # Convolution layers with proper channel alignment
        self.cv1 = Conv(c1, c3, 1, 1)
        self.cv2 = nn.Sequential(
            RepNCSP(self.c, c4, c5),  # Use split channels
            Conv(c4, c4, 3, 1)
        )
        self.cv3 = nn.Sequential(
            RepNCSP(c4, c4, c5),
            Conv(c4, c4, 3, 1)
        )
        # Final conv matches output channels
        self.cv4 = Conv(c3 + 2 * c4, c2, 1, 1)

    def forward(self, x):
        y = list(self.cv1(x).chunk(2, 1))
        y.extend(m(y[-1]) for m in [self.cv2, self.cv3])
        return self.cv4(torch.cat(y, 1))

class ADown(nn.Module):
    def __init__(self, c1, c2):
        super().__init__()
        # Verify even channel split
        assert c1 % 2 == 0, "Input channels must be even"
        self.c = c2 // 2
        
        # Convolution layers with matched channels
        self.cv1 = Conv(c1 // 2, self.c, 3, 2, 1)
        self.cv2 = Conv(c1 // 2, self.c, 1, 1, 0)

    def forward(self, x):
        x = F.avg_pool2d(x, 2, 1, 0, False, True)
        x1, x2 = x.chunk(2, 1)
        x1 = self.cv1(x1)
        x2 = F.max_pool2d(x2, 3, 2, 1)
        x2 = self.cv2(x2)
        return torch.cat((x1, x2), 1)


        def build_stem_layer(self) -> nn.Module:
    """Build stem layers as per YAML specification."""
    return nn.Sequential(
        ConvModule(
            self.input_channels,
            make_divisible(64, self.widen_factor),  # First stem to 64 channels
            kernel_size=3,
            stride=2,
            padding=1,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg
        ),
        ConvModule(
            make_divisible(64, self.widen_factor),
            make_divisible(128, self.widen_factor),  # Second stem to 128 channels
            kernel_size=3,
            stride=2,
            padding=1,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg
        )
    )


    arch_settings = {
    'P5': [
        # Format: [in_channels, c2, c3, c4, num_blocks, use_adown]
        [128, 256, 128, 64, 1, True],   # Stage3 (160x160→80x80)
        [256, 512, 256, 128, 1, True],  # Stage5 (80x80→40x40)
        [512, 512, 512, 256, 1, True],  # Stage7 (40x40→20x20)
        [512, 512, 512, 256, 1, False]  # Stage9 (20x20 final)
    ]
}

    """Path Aggregation Network used in YOLO World
    Following YOLOv8 PAFPN, including text to image fusion
    """
    def __init__(self,
                 in_channels: List[int],
                 out_channels: Union[List[int], int],
                 guide_channels: int,
                 embed_channels: List[int],
                 num_heads: List[int],
                 deepen_factor: float = 1.0,
                 widen_factor: float = 1.0,
                 num_csp_blocks: int = 3,
                 freeze_all: bool = False,
                 block_cfg: ConfigType = dict(type='CSPLayerWithTwoConv'),
                 norm_cfg: ConfigType = dict(type='BN',
                                             momentum=0.03,
                                             eps=0.001),
                 act_cfg: ConfigType = dict(type='SiLU', inplace=True),
                 init_cfg: OptMultiConfig = None) -> None:
        self.guide_channels = guide_channels
        self.embed_channels = embed_channels
        self.num_heads = num_heads
        self.block_cfg = block_cfg
        super().__init__(in_channels=in_channels,
                         out_channels=out_channels,
                         deepen_factor=deepen_factor,
                         widen_factor=widen_factor,
                         num_csp_blocks=num_csp_blocks,
                         freeze_all=freeze_all,
                         norm_cfg=norm_cfg,
                         act_cfg=act_cfg,
                         init_cfg=init_cfg)

    def build_top_down_layer(self, idx: int) -> nn.Module:
        """build top down layer.

        Args:
            idx (int): layer idx.

        Returns:
            nn.Module: The top down layer.
        """
        block_cfg = copy.deepcopy(self.block_cfg)
        block_cfg.update(
            dict(in_channels=make_divisible(
                (self.in_channels[idx - 1] + self.in_channels[idx]),
                self.widen_factor),
                 out_channels=make_divisible(self.out_channels[idx - 1],
                                             self.widen_factor),
                 guide_channels=self.guide_channels,
                 embed_channels=make_round(self.embed_channels[idx - 1],
                                           self.widen_factor),
                 num_heads=make_round(self.num_heads[idx - 1],
                                      self.widen_factor),
                 num_blocks=make_round(self.num_csp_blocks,
                                       self.deepen_factor),
                 add_identity=False,
                 norm_cfg=self.norm_cfg,
                 act_cfg=self.act_cfg))
        return MODELS.build(block_cfg)

    def build_bottom_up_layer(self, idx: int) -> nn.Module:
        """build bottom up layer.

        Args:
            idx (int): layer idx.

        Returns:
            nn.Module: The bottom up layer.
        """
        block_cfg = copy.deepcopy(self.block_cfg)
        block_cfg.update(
            dict(in_channels=make_divisible(
                (self.out_channels[idx] + self.out_channels[idx + 1]),
                self.widen_factor),
                 out_channels=make_divisible(self.out_channels[idx + 1],
                                             self.widen_factor),
                 guide_channels=self.guide_channels,
                 embed_channels=make_round(self.embed_channels[idx + 1],
                                           self.widen_factor),
                 num_heads=make_round(self.num_heads[idx + 1],
                                      self.widen_factor),
                 num_blocks=make_round(self.num_csp_blocks,
                                       self.deepen_factor),
                 add_identity=False,
                 norm_cfg=self.norm_cfg,
                 act_cfg=self.act_cfg))
        return MODELS.build(block_cfg)

    def forward(self, img_feats: List[Tensor], txt_feats: Tensor = None) -> tuple:
        """Forward function.
        including multi-level image features, text features: BxLxD
        """
        assert len(img_feats) == len(self.in_channels)
        # reduce layers
        reduce_outs = []
        for idx in range(len(self.in_channels)):
            reduce_outs.append(self.reduce_layers[idx](img_feats[idx]))

        # top-down path
        inner_outs = [reduce_outs[-1]]
        for idx in range(len(self.in_channels) - 1, 0, -1):
            feat_high = inner_outs[0]
            feat_low = reduce_outs[idx - 1]
            upsample_feat = self.upsample_layers[len(self.in_channels) - 1 -
                                                 idx](feat_high)
            if self.upsample_feats_cat_first:
                top_down_layer_inputs = torch.cat([upsample_feat, feat_low], 1)
            else:
                top_down_layer_inputs = torch.cat([feat_low, upsample_feat], 1)
            inner_out = self.top_down_layers[len(self.in_channels) - 1 - idx](
                top_down_layer_inputs, txt_feats)
            inner_outs.insert(0, inner_out)

        # bottom-up path
        outs = [inner_outs[0]]
        for idx in range(len(self.in_channels) - 1):
            feat_low = outs[-1]
            feat_high = inner_outs[idx + 1]
            downsample_feat = self.downsample_layers[idx](feat_low)
            out = self.bottom_up_layers[idx](torch.cat(
                [downsample_feat, feat_high], 1), txt_feats)
            outs.append(out)

        # out_layers
        results = []
        for idx in range(len(self.in_channels)):
            results.append(self.out_layers[idx](outs[idx]))

        return tuple(results)


          File "/home/wrf/anaconda3/envs/mark-y2/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
    return forward_call(*input, **kwargs)
  File "/home/wrf/Dara/YOLO-World/yolo_world/models/necks/yolo_world_pafpn.py", line 301, in forward
    top_down_layer_inputs = torch.cat([upsample_feat, feat_low], 1)
RuntimeError: Sizes of tensors must match except in dimension 1. Expected size 40 but got size 20 for tensor number 1 in the list.

            upsample_cfg=dict(
                scale_factor=2,
                mode='nearest',
                align_corners=False
            ),


def forward(self, img_feats: List[Tensor], txt_feats: Tensor = None) -> tuple:
    """Modified forward with dimension validation."""
    assert len(img_feats) == len(self.in_channels)
    
    # Add spatial dimension validation
    for feat in img_feats:
        _, _, h, w = feat.shape
        assert h == w, f"Non-square features {h}x{w} not supported"
        assert h % 32 == 0, f"Feature size {h} must be divisible by 32"

    # ... rest of original forward code ...
    
    # Modified concatenation with dimension check
    for idx in range(len(self.in_channels) - 1, 0, -1):
        feat_high = inner_outs[0]
        feat_low = reduce_outs[idx - 1]
        
        # Ensure proper upsampling
        upsample_feat = F.interpolate(
            feat_high, 
            scale_factor=2, 
            mode='nearest'
        )
        
        # Validate dimensions before concatenation
        assert upsample_feat.shape[-2:] == feat_low.shape[-2:], \
            f"Dimension mismatch: {upsample_feat.shape} vs {feat_low.shape}"
            
        top_down_layer_inputs = torch.cat([upsample_feat, feat_low], 1)
        inner_out = self.top_down_layers[len(self.in_channels)-1-idx](top_down_layer_inputs, txt_feats)
        inner_outs.insert(0, inner_out)

  File "/home/wrf/anaconda3/envs/mark-y2/lib/python3.9/site-packages/mmengine/registry/build_functions.py", line 121, in build_from_cfg
    obj = obj_cls(**args)  # type: ignore
TypeError: __init__() got an unexpected keyword argument 'upsample_cfg'

def build_top_down_layer(self, idx: int) -> nn.Module:
    """Build top-down layer with proper configuration."""
    block_cfg = copy.deepcopy(self.block_cfg)
    block_cfg.update(
        dict(
            in_channels=make_divisible(
                (self.in_channels[idx - 1] + self.in_channels[idx]),
                self.widen_factor
            ),
            out_channels=make_divisible(
                self.out_channels[idx - 1],
                self.widen_factor
            ),
            # Remove upsample_cfg from here
            guide_channels=self.guide_channels,
            embed_channels=make_round(self.embed_channels[idx - 1], 
                                    self.widen_factor),
            num_heads=make_round(self.num_heads[idx - 1], 
                               self.widen_factor),
            num_blocks=make_round(self.num_csp_blocks, 
                                self.deepen_factor),
            add_identity=False,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg
        )
    )
    return MODELS.build(block_cfg)

def forward(self, img_feats: List[Tensor], txt_feats: Tensor = None) -> tuple:
    """Modified forward with proper upsampling."""
    assert len(img_feats) == len(self.in_channels)
    
    # Reduce layers
    reduce_outs = []
    for idx in range(len(self.in_channels)):
        reduce_outs.append(self.reduce_layers[idx](img_feats[idx]))

    # Top-down path
    inner_outs = [reduce_outs[-1]]
    for idx in range(len(self.in_channels) - 1, 0, -1):
        feat_high = inner_outs[0]
        feat_low = reduce_outs[idx - 1]
        
        # Explicit upsampling with scale factor
        upsample_feat = F.interpolate(
            feat_high, 
            scale_factor=2, 
            mode='nearest'
        )
        
        # Dimension validation
        if upsample_feat.shape[-2:] != feat_low.shape[-2:]:
            raise ValueError(
                f"Feature size mismatch: {upsample_feat.shape} vs {feat_low.shape}. "
                "Check backbone output dimensions."
            )
            
        top_down_layer_inputs = torch.cat([upsample_feat, feat_low], 1)
        inner_out = self.top_down_layers[len(self.in_channels)-1-idx](
            top_down_layer_inputs, txt_feats
        )
        inner_outs.insert(0, inner_out)

    # Rest of bottom-up path remains the same
    # ... (保持原有bottom-up部分的代码不变)