class FiLMLayer(nn.Module):
    """Produce gamma, beta from text embeddings to modulate image features."""
    def __init__(self, txt_dim, num_channels):
        super().__init__()
        # Linear to produce (gamma, beta) for each channel
        self.film_gen = nn.Linear(txt_dim, 2 * num_channels, bias=True)

    def forward(self, feats: torch.Tensor, txt_emb: torch.Tensor) -> torch.Tensor:
        """
        feats: B x C x H x W
        txt_emb: B x txt_dim (pooled text embedding)
        """
        B, C, H, W = feats.shape
        film_params = self.film_gen(txt_emb)  # (B x 2*C)
        gamma, beta = film_params[:, :C], film_params[:, C:]
        gamma = gamma.view(B, C, 1, 1)
        beta = beta.view(B, C, 1, 1)
        return feats * (1 + gamma) + beta

class CSPLayerWithTwoConvText(BaseModule):
    """CSP Layer with 2 convs + cross-attention + FiLM gating."""
    
    def __init__(self, in_channels: int, out_channels: int, expand_ratio: float = 0.5, 
                 mid_channels: int = 256,  # Explicit mid_channels control
                 num_blocks: int = 1, add_identity: bool = True, guide_channels: int = 256,
                 embed_channels: int = 512, num_heads: int = 8,  # 256//32=8 heads
                 num_fusion_stages: int = 1, use_film: bool = True, 
                 conv_cfg: Optional[dict] = None, 
                 norm_cfg: dict = dict(type='BN'), 
                 act_cfg: dict = dict(type='SiLU', inplace=True),
                 init_cfg: Optional[dict] = None):
        super().__init__(init_cfg=init_cfg)

        self.mid_channels = mid_channels
        self.guide_channels = guide_channels
        self.num_fusion_stages = num_fusion_stages
        self.use_film = use_film

        # Main 1x1 conv splits features
        self.main_conv = ConvModule(in_channels, 2 * self.mid_channels, 1,
                                   conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)

        # Darknet bottlenecks
        self.blocks = nn.ModuleList([
            DarknetBottleneck(self.mid_channels, self.mid_channels, expansion=1,
                              kernel_size=(3, 3), padding=(1, 1), add_identity=add_identity)
            for _ in range(num_blocks)
        ])

        # Final conv after concatenation
        self.final_conv = ConvModule((2 + num_blocks) * self.mid_channels, out_channels, 1,
                                    conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)

        # Text fusion modules
        self.img_proj = nn.Conv2d(self.mid_channels, self.guide_channels, kernel_size=1)
        self.txt_proj = nn.Linear(embed_channels, self.guide_channels)
        self.cross_attn = nn.MultiheadAttention(self.guide_channels, num_heads, batch_first=True)
        self.attn_proj = nn.Conv2d(self.guide_channels, self.mid_channels, kernel_size=1)
        
        if use_film:
            self.film = FiLMLayer(embed_channels, self.mid_channels)

    def forward(self, x: torch.Tensor, txt_feats: Optional[torch.Tensor] = None) -> torch.Tensor:
        x_main = self.main_conv(x)
        x_main = list(x_main.split((self.mid_channels, self.mid_channels), dim=1))

        if txt_feats is not None:
            # Reduce text features
            if txt_feats.dim() == 3:
                txt_emb = txt_feats.mean(dim=1)  # [B, 512]
            else:
                txt_emb = txt_feats  # [B, 512]
            
            # Project text embeddings
            # print("txt_emb shape :", txt_emb.shape)
            txt_proj = self.txt_proj(txt_emb)  # [B, 256]
            
            if self.use_film:
                x_main[1] = self.film(x_main[1], txt_emb)

            # Cross-attention fusion
            B, C, H, W = x_main[1].shape
            img_tokens = self.img_proj(x_main[1])  # [B, 256->512, H, W]
            img_tokens = img_tokens.flatten(2).transpose(1, 2)  # [B, H*W, 512]
            
            txt_proj = txt_proj.unsqueeze(1)  # [B, 1, 512]
            attn_out, _ = self.cross_attn(img_tokens, txt_proj, txt_proj)
            attn_out = attn_out.transpose(1, 2).view(B, self.guide_channels, H, W)
            attn_out = self.attn_proj(attn_out)  # [B, 256, H, W]
            
            x_main[1] = x_main[1] + attn_out  # Both 256 channels

        # Process through blocks
        for block in self.blocks:
            x_main.append(block(x_main[-1]))

        out = torch.cat(x_main, dim=1)
        return self.final_conv(out)

        0                -1  1         0  models.common.Silence                   []                            
  1                -1  1      1856  models.common.Conv                      [3, 64, 3, 2]                 
  2                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               
  3                -1  1    212864  models.common.RepNCSPELAN4              [128, 256, 128, 64, 1]        
  4                -1  1    164352  models.common.ADown                     [256, 256]                    
  5                -1  1    847616  models.common.RepNCSPELAN4              [256, 512, 256, 128, 1]       
  6                -1  1    656384  models.common.ADown                     [512, 512]                    
  7                -1  1   2857472  models.common.RepNCSPELAN4              [512, 512, 512, 256, 1]       
  8                -1  1    656384  models.common.ADown                     [512, 512]                    
  9                -1  1   2857472  models.common.RepNCSPELAN4              [512, 512, 512, 256, 1]       
 10                -1  1    656896  models.common.SPPELAN                   [512, 512, 256]               
 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          
 12           [-1, 7]  1         0  models.common.Concat                    [1]                           
 13                -1  1   3119616  models.common.RepNCSPELAN4              [1024, 512, 512, 256, 1]      
 14                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          
 15           [-1, 5]  1         0  models.common.Concat                    [1]                           
 16                -1  1    912640  models.common.RepNCSPELAN4              [1024, 256, 256, 128, 1]      
 17                -1  1    164352  models.common.ADown                     [256, 256]                    
 18          [-1, 13]  1         0  models.common.Concat                    [1]                           
 19                -1  1   2988544  models.common.RepNCSPELAN4              [768, 512, 512, 256, 1]       
 20                -1  1    656384  models.common.ADown                     [512, 512]                    
 21          [-1, 10]  1         0  models.common.Concat                    [1]                           
 22                -1  1   3119616  models.common.RepNCSPELAN4              [1024, 512, 512, 256, 1]      
 23                 5  1    131328  models.common.CBLinear                  [512, [256]]                  
 24                 7  1    393984  models.common.CBLinear                  [512, [256, 512]]             
 25                 9  1    656640  models.common.CBLinear                  [512, [256, 512, 512]]        
 26                 0  1      1856  models.common.Conv                      [3, 64, 3, 2]                 
 27                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               
 28                -1  1    212864  models.common.RepNCSPELAN4              [128, 256, 128, 64, 1]        
 29                -1  1    164352  models.common.ADown                     [256, 256]                    
 30  [23, 24, 25, -1]  1         0  models.common.CBFuse                    [[0, 0, 0]]                   
 31                -1  1    847616  models.common.RepNCSPELAN4              [256, 512, 256, 128, 1]       
 32                -1  1    656384  models.common.ADown                     [512, 512]                    
 33      [24, 25, -1]  1         0  models.common.CBFuse                    [[1, 1]]                      
 34                -1  1   2857472  models.common.RepNCSPELAN4              [512, 512, 512, 256, 1]       
 35                -1  1    656384  models.common.ADown                     [512, 512]                    
 36          [25, -1]  1         0  models.common.CBFuse                    [[2]]                         
 37                -1  1   2857472  models.common.RepNCSPELAN4              [512, 512, 512, 256, 1]       
 38[31, 34, 37, 16, 19, 22]  1  21725312  models.yolo.DualDDetect                 [80, [512, 512, 512, 256, 512, 512]]


    YOLOv9-style backbone built on MMDetection's BaseBackbone.
    Stem      : Focus
    Each stage: Conv(stride=2) → E-ELAN(depth=round(n*deep)) → optional SPP


    arch_settings = {
        'P5': [[64, 128, 3, True, False], [128, 256, 6, True, False],
               [256, 512, 6, True, False], [512, 1024, 3, True, True]],
    }

    def __init__(self,
                 arch: str = 'P5',
                 last_stage_out_channels: int = 1024,
                 plugins: Union[dict, List[dict]] = None,
                 deepen_factor: float = 1.25,
                 widen_factor: float = 1.25,
                 input_channels: int = 3,
                 out_indices: Tuple[int] = (2, 3, 4),
                 frozen_stages: int = -1,
                 norm_cfg: ConfigType = dict(
                     type='BN', momentum=0.03, eps=0.001),
                 act_cfg: ConfigType = dict(type='SiLU', inplace=True),
                 norm_eval: bool = False,
                 init_cfg: OptMultiConfig = None):
        self.arch_settings[arch][-1][1] = last_stage_out_channels
        super().__init__(
            self.arch_settings[arch],
            deepen_factor,
            widen_factor,
            input_channels=input_channels,
            out_indices=out_indices,
            plugins=plugins,
            frozen_stages=frozen_stages,
            norm_cfg=norm_cfg,
            act_cfg=act_cfg,
            norm_eval=norm_eval,
            init_cfg=init_cfg)

    def build_stem_layer(self) -> nn.Module:
        """Replace the vanilla stem with YOLOv5/V8/V9 Focus."""
        # arch_setting[0][0] is the first stage's in_channels before widen
        stem_ch = make_divisible(self.arch_setting[0][0] * self.widen_factor)
        return Focus(
            self.input_channels,
            stem_ch,
            kernel_size=3, 
            stride=2, 
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg
        )

    def build_stage_layer(self, stage_idx: int, setting: list) -> list:
        """Build a stage layer.

        Args:
            stage_idx (int): The index of a stage layer.
            setting (list): The architecture setting of a stage layer.
        """
        """
        Build one stage: 
          1) ConvModule (stride=2)
          2) E-ELAN block (depth = round(blocks * deepen))
          3) Optional SPP
        """
        in_channels, out_channels, num_blocks, add_identity, use_spp = setting

        in_channels = make_divisible(in_channels, self.widen_factor)
        out_channels = make_divisible(out_channels, self.widen_factor)
        num_blocks = make_round(num_blocks, self.deepen_factor)
        depth = max(round(num_blocks * self.deepen_factor), 1)
        stage = []

        conv_layer = ConvModule(
            in_channels,
            out_channels,
            kernel_size=3,
            stride=2,
            padding=1,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg)
        stage.append(conv_layer)

        elan = EELAN(
            out_channels,
            out_channels,
            depth=depth,
            width=self.widen_factor
        )
        stage.append(elan)
        if use_spp:
            sppelan_layer = SPPELAN(
                out_channels,
                out_channels,
                make_divisible(out_channels * 0.5, self.widen_factor)
            )
            stage.append(sppelan_layer)
        return stage


            hidden = make_divisible(out_channels * 0.5, 8)
            spp = nn.Sequential(
                nn.MaxPool2d(kernel_size=5, stride=1, padding=2),
                ConvModule(out_channels, hidden, 1, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg),
                ConvModule(hidden, out_channels, 3, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
            )
            stage.append(spp)


           return forward_call(*input, **kwargs)
  File "/home/wrf/Dara/YOLO-World/yolo_world/models/necks/yolo_world_pafpn.py", line 524, in forward
    top_down_layer_inputs = torch.cat([upsample_feat, feat_low], 1)
RuntimeError: Sizes of tensors must match except in dimension 1. Expected size 16 but got size 20 for tensor number 1 in the list.

# neck_yolov9_fixed.py

import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmdet.models.necks.base_yolo_neck import BaseYOLONeck
from mmdet.models.builder import NECKS
from mmdet.models.utils import make_divisible, make_round
from yolov9.neck import RepNCSPELAN4  # your ELAN fusion block

@NECKS.register_module()
class YOLOv9PAFPNFixed(BaseYOLONeck):
    def __init__(self,
                 in_channels, out_channels,
                 deepen_factor=1.25, widen_factor=1.25,
                 num_csp_blocks=3, freeze_all=False,
                 norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
                 act_cfg=dict(type='SiLU', inplace=True),
                 init_cfg=None):
        self.num_csp_blocks = num_csp_blocks
        super().__init__(
            in_channels=in_channels, out_channels=out_channels,
            deepen_factor=deepen_factor, widen_factor=widen_factor,
            num_csp_blocks=num_csp_blocks, freeze_all=freeze_all,
            norm_cfg=norm_cfg, act_cfg=act_cfg, init_cfg=init_cfg
        )

    def build_reduce_layer(self, idx: int) -> nn.Module:
        return nn.Identity()

    def build_top_down_layer(self, idx: int) -> nn.Module:
        in_ch  = make_divisible((self.in_channels[idx-1] + self.in_channels[idx]), self.widen_factor)
        out_ch = make_divisible(self.out_channels[idx-1], self.widen_factor)
        hidden = make_divisible(out_ch * 0.5, self.widen_factor)
        depth  = make_round(self.num_csp_blocks, self.deepen_factor)
        return RepNCSPELAN4(in_ch, out_ch, hidden*2, hidden, depth)

    def build_bottom_up_layer(self, idx: int) -> nn.Module:
        in_ch  = make_divisible((self.out_channels[idx] + self.out_channels[idx+1]), self.widen_factor)
        out_ch = make_divisible(self.out_channels[idx+1], self.widen_factor)
        hidden = make_divisible(out_ch * 0.5, self.widen_factor)
        depth  = make_round(self.num_csp_blocks, self.deepen_factor)
        return RepNCSPELAN4(in_ch, out_ch, hidden*2, hidden, depth)

    def forward(self, inputs):
        # inputs: tuple of backbone outputs
        feats = list(inputs)
        # top-down
        for i in range(len(self.in_channels)-1, 0, -1):
            up = F.interpolate(
                feats[i], size=feats[i-1].shape[2:], mode='nearest'
            )
            feats[i-1] = self.top_down_layers[i](
                torch.cat([up, feats[i-1]], dim=1)
            )
        # bottom-up
        for i in range(len(self.in_channels)-1):
            down = self.downsample_layers[i](feats[i])
            feats[i+1] = self.bottom_up_layers[i](
                torch.cat([down, feats[i+1]], dim=1)
            )
        return tuple(feats)

