class FiLMLayer(nn.Module):
    """Produce gamma, beta from text embeddings to modulate image features."""
    def __init__(self, txt_dim, num_channels):
        super().__init__()
        # Linear to produce (gamma, beta) for each channel
        self.film_gen = nn.Linear(txt_dim, 2 * num_channels, bias=True)

    def forward(self, feats: torch.Tensor, txt_emb: torch.Tensor) -> torch.Tensor:
        """
        feats: B x C x H x W
        txt_emb: B x txt_dim (pooled text embedding)
        """
        B, C, H, W = feats.shape
        film_params = self.film_gen(txt_emb)  # (B x 2*C)
        gamma, beta = film_params[:, :C], film_params[:, C:]
        gamma = gamma.view(B, C, 1, 1)
        beta = beta.view(B, C, 1, 1)
        return feats * (1 + gamma) + beta

class CSPLayerWithTwoConvText(BaseModule):
    """CSP Layer with 2 convs + cross-attention + FiLM gating."""
    
    def __init__(self, in_channels: int, out_channels: int, expand_ratio: float = 0.5, 
                 mid_channels: int = 256,  # Explicit mid_channels control
                 num_blocks: int = 1, add_identity: bool = True, guide_channels: int = 256,
                 embed_channels: int = 512, num_heads: int = 8,  # 256//32=8 heads
                 num_fusion_stages: int = 1, use_film: bool = True, 
                 conv_cfg: Optional[dict] = None, 
                 norm_cfg: dict = dict(type='BN'), 
                 act_cfg: dict = dict(type='SiLU', inplace=True),
                 init_cfg: Optional[dict] = None):
        super().__init__(init_cfg=init_cfg)

        self.mid_channels = mid_channels
        self.guide_channels = guide_channels
        self.num_fusion_stages = num_fusion_stages
        self.use_film = use_film

        # Main 1x1 conv splits features
        self.main_conv = ConvModule(in_channels, 2 * self.mid_channels, 1,
                                   conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)

        # Darknet bottlenecks
        self.blocks = nn.ModuleList([
            DarknetBottleneck(self.mid_channels, self.mid_channels, expansion=1,
                              kernel_size=(3, 3), padding=(1, 1), add_identity=add_identity)
            for _ in range(num_blocks)
        ])

        # Final conv after concatenation
        self.final_conv = ConvModule((2 + num_blocks) * self.mid_channels, out_channels, 1,
                                    conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)

        # Text fusion modules
        self.img_proj = nn.Conv2d(self.mid_channels, self.guide_channels, kernel_size=1)
        self.txt_proj = nn.Linear(embed_channels, self.guide_channels)
        self.cross_attn = nn.MultiheadAttention(self.guide_channels, num_heads, batch_first=True)
        self.attn_proj = nn.Conv2d(self.guide_channels, self.mid_channels, kernel_size=1)
        
        if use_film:
            self.film = FiLMLayer(embed_channels, self.mid_channels)

    def forward(self, x: torch.Tensor, txt_feats: Optional[torch.Tensor] = None) -> torch.Tensor:
        x_main = self.main_conv(x)
        x_main = list(x_main.split((self.mid_channels, self.mid_channels), dim=1))

        if txt_feats is not None:
            # Reduce text features
            if txt_feats.dim() == 3:
                txt_emb = txt_feats.mean(dim=1)  # [B, 512]
            else:
                txt_emb = txt_feats  # [B, 512]
            
            # Project text embeddings
            # print("txt_emb shape :", txt_emb.shape)
            txt_proj = self.txt_proj(txt_emb)  # [B, 256]
            
            if self.use_film:
                x_main[1] = self.film(x_main[1], txt_emb)

            # Cross-attention fusion
            B, C, H, W = x_main[1].shape
            img_tokens = self.img_proj(x_main[1])  # [B, 256->512, H, W]
            img_tokens = img_tokens.flatten(2).transpose(1, 2)  # [B, H*W, 512]
            
            txt_proj = txt_proj.unsqueeze(1)  # [B, 1, 512]
            attn_out, _ = self.cross_attn(img_tokens, txt_proj, txt_proj)
            attn_out = attn_out.transpose(1, 2).view(B, self.guide_channels, H, W)
            attn_out = self.attn_proj(attn_out)  # [B, 256, H, W]
            
            x_main[1] = x_main[1] + attn_out  # Both 256 channels

        # Process through blocks
        for block in self.blocks:
            x_main.append(block(x_main[-1]))

        out = torch.cat(x_main, dim=1)
        return self.final_conv(out)
        
class YOLOWorldPAFPN(YOLOv8PAFPN):
    print("+++++++++++ TESTING NEW YOLOWorldPAFPN Log50")
    """Path Aggregation Network for YOLO-World, including text→image fusion.

    This version modifies the default YOLOv8 PAFPN to:
      1) Use a text-aware CSP block (e.g. CSPLayerWithTwoConvText).
      2) Pass additional config (guide_channels, embed_channels, num_heads, etc.).
      3) Optionally enable multi-stage cross-attn (num_fusion_stages) or FiLM gating (use_film).

    Args:
        in_channels (List[int]): Channels of backbone features (C3, C4, C5).
        out_channels (List[int] or int): Channels after each PAN stage.
        guide_channels (int): Dimension for image/text projections in cross-attn.
        embed_channels (List[int]): Dimension of text embeddings per scale.
        num_heads (List[int]): Number of attention heads per scale.
        num_fusion_stages (int): How many times to run cross-attn in each CSP block.
        use_film (bool): Whether to apply FiLM gating in each CSP block.
        deepen_factor (float): YOLOv8 param for controlling depth.
        widen_factor (float): YOLOv8 param for controlling width.
        num_csp_blocks (int): Number of DarknetBottleneck blocks in each CSP layer.
        freeze_all (bool): If True, freeze all parameters in this neck.
        block_cfg (dict): Base config for the CSP block. We override 'type' and text params.
        norm_cfg (dict): Normalization config (BN by default).
        act_cfg (dict): Activation config (SiLU by default).
        init_cfg: Initialization config.
    """

    def __init__(self,
                 in_channels: List[int],
                 out_channels: Union[List[int], int],
                 guide_channels: int,
                 embed_channels: List[int],
                 num_heads: List[int],
                 num_fusion_stages: int = 1,
                 use_film: bool = False,
                 deepen_factor: float = 1.0,
                 widen_factor: float = 1.0,
                 num_csp_blocks: int = 3,
                 freeze_all: bool = False,
                 block_cfg: dict = dict(type='CSPLayerWithTwoConvText'),
                 norm_cfg: dict = dict(type='BN', momentum=0.03, eps=0.001),
                 act_cfg: dict = dict(type='SiLU', inplace=True),
                 init_cfg=None) -> None:

        self.guide_channels = guide_channels
        self.embed_channels = embed_channels
        self.num_heads = num_heads
        self.num_fusion_stages = num_fusion_stages
        self.use_film = use_film
        self.block_cfg = block_cfg

        super().__init__(in_channels=in_channels,
                         out_channels=out_channels,
                         deepen_factor=deepen_factor,
                         widen_factor=widen_factor,
                         num_csp_blocks=num_csp_blocks,
                         freeze_all=freeze_all,
                         norm_cfg=norm_cfg,
                         act_cfg=act_cfg,
                         init_cfg=init_cfg)

    def build_top_down_layer(self, idx: int) -> nn.Module:
        """Build a top-down layer for the FPN path.

        Args:
            idx (int): Which layer index we’re building.

        Returns:
            nn.Module: The top-down CSP block with text fusion.
        """
        block_cfg = copy.deepcopy(self.block_cfg)
        # Ensure we use the text-aware CSP class
        block_cfg.update(dict(
            type='CSPLayerWithTwoConvText',  # or whatever your text-based CSP is named
            in_channels=make_divisible(self.in_channels[idx - 1] + self.in_channels[idx],
                                       self.widen_factor),
            out_channels=make_divisible(self.out_channels[idx - 1],
                                        self.widen_factor),
            guide_channels=256,
            embed_channels=512,
            num_heads=512//32,
            num_blocks=make_round(self.num_csp_blocks,
                                  self.deepen_factor),
            num_fusion_stages=self.num_fusion_stages,
            use_film=self.use_film,
            add_identity=False,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg
        ))
        return MODELS.build(block_cfg)

    def build_bottom_up_layer(self, idx: int) -> nn.Module:
        """Build a bottom-up layer for the PAN path.

        Args:
            idx (int): Which layer index we’re building.

        Returns:
            nn.Module: The bottom-up CSP block with text fusion.
        """
        block_cfg = copy.deepcopy(self.block_cfg)
        # Same approach for bottom-up
        block_cfg.update(dict(
            type='CSPLayerWithTwoConvText',
            in_channels=make_divisible(self.out_channels[idx] + self.out_channels[idx + 1],
                                       self.widen_factor),
            out_channels=make_divisible(self.out_channels[idx + 1],
                                        self.widen_factor),
            guide_channels=256,
            embed_channels=512,
            num_heads=512//32,
            num_blocks=make_round(self.num_csp_blocks,
                                  self.deepen_factor),
            num_fusion_stages=self.num_fusion_stages,
            use_film=self.use_film,
            add_identity=False,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg
        ))
        return MODELS.build(block_cfg)

    def forward(self, img_feats: List[torch.Tensor], txt_feats: torch.Tensor = None) -> tuple:
        """Forward pass with text embeddings.

        Args:
            img_feats (List[Tensor]): List of feature maps from backbone (C3, C4, C5).
            txt_feats (Tensor, optional): Text embeddings, shape [B, L, D] or [B, D].
                                          Defaults to None.

        Returns:
            tuple: multi-scale output feature maps from the final PAN layers.
        """
        assert len(img_feats) == len(self.in_channels)
        # 1. Reduce layers
        reduce_outs = []
        for idx in range(len(self.in_channels)):
            reduce_outs.append(self.reduce_layers[idx](img_feats[idx]))

        # 2. Top-Down path
        inner_outs = [reduce_outs[-1]]
        for idx in range(len(self.in_channels) - 1, 0, -1):
            feat_high = inner_outs[0]
            feat_low = reduce_outs[idx - 1]
            upsample_feat = self.upsample_layers[len(self.in_channels) - 1 - idx](feat_high)

            if self.upsample_feats_cat_first:
                top_down_layer_inputs = torch.cat([upsample_feat, feat_low], dim=1)
            else:
                top_down_layer_inputs = torch.cat([feat_low, upsample_feat], dim=1)

            # pass text feats to the CSP block
            inner_out = self.top_down_layers[len(self.in_channels) - 1 - idx](
                top_down_layer_inputs, txt_feats
            )
            inner_outs.insert(0, inner_out)

        # 3. Bottom-Up path
        outs = [inner_outs[0]]
        for idx in range(len(self.in_channels) - 1):
            feat_low = outs[-1]
            feat_high = inner_outs[idx + 1]
            downsample_feat = self.downsample_layers[idx](feat_low)

            out = self.bottom_up_layers[idx](
                torch.cat([downsample_feat, feat_high], dim=1),
                txt_feats
            )
            outs.append(out)

        # 4. Out Layers
        results = []
        for idx in range(len(self.in_channels)):
            results.append(self.out_layers[idx](outs[idx]))

        return tuple(results)