@MODELS.register_module()
class DTIPABlock(nn.Module):
    def __init__(self,
                 in_channels: int,
                 out_channels: int,
                 guide_channels: int,
                 embed_channels: int,
                 num_heads: int,
                 num_blocks: int,
                 add_identity: bool = False,
                 norm_cfg: dict = None,
                 act_cfg: dict = None):
        super().__init__()
        norm_cfg = norm_cfg or dict(type='BN', momentum=0.03, eps=0.001)
        act_cfg = act_cfg or dict(type='SiLU', inplace=True)

        # text proj to embed_channels
        self._pool    = nn.AdaptiveAvgPool2d(1)
        self._flatten = nn.Flatten(1)
        self._linear  = nn.Linear(guide_channels, embed_channels)

        # visual conv
        self.visual_proj = ConvModule(
            in_channels, out_channels, 3, padding=1,
            norm_cfg=norm_cfg, act_cfg=act_cfg
        )

        # fusion prototypes
        D = embed_channels + out_channels
        self.prototype = nn.Parameter(torch.randn(64, D))

        # fusion MLP
        self.fusion = nn.Sequential(
            nn.Linear(D, 4 * D),
            nn.GELU(),
            nn.Linear(4 * D, 64),
            nn.Softmax(dim=-1)
        )

        # **fixed**: accept D channels, not 64
        self.out_conv = ConvModule(
            D, out_channels, 1,
            norm_cfg=norm_cfg, act_cfg=act_cfg
        )

    def forward(self, x: torch.Tensor, txt_feats: torch.Tensor) -> torch.Tensor:
        B, Cvis, H, W = x.shape

        # Text → (B, guide_channels)
        if txt_feats.ndim == 4:
            t = self._flatten(self._pool(txt_feats))
        elif txt_feats.ndim == 3:
            t = txt_feats.mean(1)
        elif txt_feats.ndim == 2:
            t = txt_feats
        else:
            raise ValueError(f"Unsupported txt_feats.dim()={txt_feats.ndim}")

        text = self._linear(t)  # (B, embed_channels)

        # Visual → (B, out_channels, H, W)
        visual = self.visual_proj(x)
        vf     = visual.flatten(2).permute(0, 2, 1)      # (B, N, out_c)
        tf     = text.unsqueeze(1).expand(-1, H*W, -1)   # (B, N, embed_c)

        cat       = torch.cat([tf, vf], dim=-1)         # (B, N, D)
        weights   = self.fusion(cat)                    # (B, N, 64)
        attended  = torch.einsum('bnp,pd->bnd', weights, self.prototype)  # (B, N, D)

        attn_map = attended.permute(0, 2, 1).view(B, D, H, W)             # (B, D, H, W)
        out      = self.out_conv(attn_map) + visual                      # (B, out_c, H, W)
        return out


        class YOLOWorldPAFPN(YOLOv8PAFPN):
    """Original structure with DTIPA blocks"""
    def __init__(self,
                 in_channels: List[int],
                 out_channels: Union[List[int], int],
                 guide_channels: int,
                 embed_channels: List[int],
                 num_heads: List[int],
                 deepen_factor: float = 1.0,
                 widen_factor: float = 1.0,
                 num_csp_blocks: int = 3,
                 freeze_all: bool = False,
                 block_cfg: ConfigType = dict(type='DTIPABlock'),  # Changed default
                 norm_cfg: ConfigType = dict(type='BN',
                                             momentum=0.03,
                                             eps=0.001),
                 act_cfg: ConfigType = dict(type='SiLU', inplace=True),
                 init_cfg: OptMultiConfig = None) -> None:

        # Original initialization
        self.num_csp_blocks = num_csp_blocks
        self.guide_channels = guide_channels
        self.embed_channels = embed_channels
        self.num_heads = num_heads
        self.block_cfg = block_cfg
        super().__init__(in_channels=in_channels,
                         out_channels=out_channels,
                         deepen_factor=deepen_factor,
                         widen_factor=widen_factor,
                         num_csp_blocks=num_csp_blocks,
                         freeze_all=freeze_all,
                         norm_cfg=norm_cfg,
                         act_cfg=act_cfg,
                         init_cfg=init_cfg)

    def build_top_down_layer(self, idx: int) -> nn.Module:
        """Modified to use DTIPABlock with original parameters"""
        block_cfg = copy.deepcopy(self.block_cfg)
        block_cfg.update(
            dict(in_channels=make_divisible(
                (self.in_channels[idx - 1] + self.in_channels[idx]),
                self.widen_factor),
                 out_channels=make_divisible(self.out_channels[idx - 1],
                                             self.widen_factor),
                 guide_channels=self.guide_channels,
                 embed_channels=make_round(self.embed_channels[idx - 1],
                                           self.widen_factor),
                 num_heads=make_round(self.num_heads[idx - 1],
                                      self.widen_factor),
                 num_blocks=make_round(self.num_csp_blocks,
                                       self.deepen_factor),
                 add_identity=False,
                 norm_cfg=self.norm_cfg,
                 act_cfg=self.act_cfg))
        return MODELS.build(block_cfg)

    def build_bottom_up_layer(self, idx: int) -> nn.Module:
        """Modified to use DTIPABlock with original parameters"""
        block_cfg = copy.deepcopy(self.block_cfg)
        block_cfg.update(
            dict(in_channels=make_divisible(
                (self.out_channels[idx] + self.out_channels[idx + 1]),
                self.widen_factor),
                 out_channels=make_divisible(self.out_channels[idx + 1],
                                             self.widen_factor),
                 guide_channels=self.guide_channels,
                 embed_channels=make_round(self.embed_channels[idx + 1],
                                           self.widen_factor),
                 num_heads=make_round(self.num_heads[idx + 1],
                                      self.widen_factor),
                 num_blocks=make_round(self.num_csp_blocks,
                                       self.deepen_factor),
                 add_identity=False,
                 norm_cfg=self.norm_cfg,
                 act_cfg=self.act_cfg))
        return MODELS.build(block_cfg)

    # Original forward() unchanged
    def forward(self, img_feats: List[Tensor], txt_feats: Tensor = None) -> tuple:
        assert len(img_feats) == len(self.in_channels)
        reduce_outs = [layer(feat) for layer, feat in zip(self.reduce_layers, img_feats)]

        inner_outs = [reduce_outs[-1]]
        for idx in range(len(self.in_channels) - 1, 0, -1):
            feat_high = inner_outs[0]
            feat_low = reduce_outs[idx - 1]
            upsample_feat = self.upsample_layers[len(self.in_channels) - 1 - idx](feat_high)
            
            if self.upsample_feats_cat_first:
                top_down_input = torch.cat([upsample_feat, feat_low], 1)
            else:
                top_down_input = torch.cat([feat_low, upsample_feat], 1)
                
            inner_out = self.top_down_layers[len(self.in_channels) - 1 - idx](top_down_input, txt_feats)
            inner_outs.insert(0, inner_out)

        outs = [inner_outs[0]]
        for idx in range(len(self.in_channels) - 1):
            feat_low = outs[-1]
            feat_high = inner_outs[idx + 1]
            downsample_feat = self.downsample_layers[idx](feat_low)
            out = self.bottom_up_layers[idx](torch.cat([downsample_feat, feat_high], 1), txt_feats)
            outs.append(out)

        return tuple([layer(feat) for layer, feat in zip(self.out_layers, outs)])

          File "/home/wrf/Dara/YOLO-World/yolo_world/models/necks/CSPLayerWithTwoConvText.py", line 73, in forward
    text = self.text_proj(txt_feats)  # Now [B, embed_channels]
  File "/home/wrf/anaconda3/envs/yolo_world/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
    return forward_call(*input, **kwargs)
  File "/home/wrf/anaconda3/envs/yolo_world/lib/python3.9/site-packages/torch/nn/modules/container.py", line 141, in forward
    input = module(input)
  File "/home/wrf/anaconda3/envs/yolo_world/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
    return forward_call(*input, **kwargs)
  File "/home/wrf/anaconda3/envs/yolo_world/lib/python3.9/site-packages/torch/nn/modules/linear.py", line 103, in forward
    return F.linear(input, self.weight, self.bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (16x1 and 512x256)