import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ("BiGatedFusion","LightBiGatedFusion",
)
"""
    通过 双向门控机制 动态控制特征融合权重 替代传统SE+轴向注意力的复杂结构
    门控模块同时考虑通道和空间维度，生成动态权重。
    双向信息流（前向+反向）增强特征互补性。
    完全去除轴向注意力计算 仅保留1x1卷积。
    
"""


class BiGatedFusion(nn.Module):
    def __init__(self, in_dim_q, in_dim_k, out_dim):
        super().__init__()
        # 通道对齐模块
        self.channel_align = nn.ModuleDict({
            'q': nn.Sequential(
                nn.Conv2d(in_dim_q, out_dim, 1),
                nn.BatchNorm2d(out_dim)),
            'k': nn.Sequential(
                nn.Conv2d(in_dim_k, out_dim, 1),
                nn.BatchNorm2d(out_dim))
        })
        
        # 双向动态门控
        self.gate_gen = nn.Sequential(
            nn.Conv2d(2*out_dim, 2*out_dim, 3, padding=1),
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(2*out_dim, 2*out_dim, 1),
            nn.Sigmoid()
        )
        
        # 特征融合模块
        self.fusion = nn.Sequential(
            nn.Conv2d(3*out_dim, out_dim, 3, padding=1),
            nn.BatchNorm2d(out_dim),
            nn.SiLU(inplace=True)
        )

    def forward(self, q, k):
        # 通道对齐
        q = self.channel_align['q'](q)  # [B, out_dim, H, W]
        k = self.channel_align['k'](k)
        
        # 生成空间-通道混合注意力
        cat_feat = torch.cat([q, k], dim=1)
        gate_q, gate_k = self.gate_gen(cat_feat).chunk(2, dim=1)
        
        # 门控融合
        fused = q * gate_q + k * gate_k
        return self.fusion(torch.cat([fused, cat_feat], dim=1))
    


class LightBiGatedFusion(nn.Module):
    def __init__(self, in_dim_q, in_dim_k, out_dim, groups=8):
        super().__init__()
        self.out_dim = out_dim
        
        # 独立通道对齐模块（分别处理q和k）
        self.align_q = nn.Sequential(
            nn.Conv2d(in_dim_q, out_dim, 1),
            nn.BatchNorm2d(out_dim)
        ) if in_dim_q != out_dim else nn.Identity()
        
        self.align_k = nn.Sequential(
            nn.Conv2d(in_dim_k, out_dim, 1),
            nn.BatchNorm2d(out_dim)
        ) if in_dim_k != out_dim else nn.Identity()
        
        # 轻量门控生成
        self.gate_gen = nn.Sequential(
            nn.Conv2d(2*out_dim, out_dim//4, 1),
            nn.Hardswish(inplace=True),
            nn.Conv2d(out_dim//4, 2*out_dim, 3, padding=1, groups=groups),
            nn.Sigmoid()
        )
        
        # 深度可分离融合
        self.fusion = nn.Sequential(
            nn.Conv2d(out_dim, out_dim, 3, padding=1, groups=out_dim),
            nn.BatchNorm2d(out_dim),
            nn.Conv2d(out_dim, out_dim, 1),
            nn.Hardswish(inplace=True)
        )

    def forward(self, q, k):
        # 独立通道对齐
        q = self.align_q(q)
        k = self.align_k(k)
        
        # 门控融合
        cat_feat = torch.cat([q, k], dim=1)
        gate_q, gate_k = self.gate_gen(cat_feat).chunk(2, dim=1)
        fused = q * gate_q + k * gate_k
        
        return self.fusion(fused) + fused




# 以下是测试 BiGatedFusion模块的代码



def test_module():
    # 设置随机种子以便结果可复现
    torch.manual_seed(0)

    # 张量形状参数
    batch_size, channels, height, width = 1, 128, 64, 64
    
    # 创建随机张量q和k
    q = torch.randn(batch_size, channels, height, width)
    k = torch.randn(batch_size, channels, height, width)
    
    # 创建一个通道数是q和k两倍的gate张量
    gate = torch.randn(batch_size, channels * 2, height, width)
    
    # 初始化BiGatedFusion模型，并传入适当的维度
    model = BiGatedFusion(in_dim_q=channels, in_dim_k=channels, out_dim=channels)
    
    # 进行融合操作
    output = model(q, k)  # 确保这里传递的参数与forward方法定义相匹配
    
    print(f"Shape of output: {output.shape}")

if __name__ == "__main__":
    test_module()