import torch
from torch import nn

class StarFFN(nn.Module):
    def __init__(self, output_dim):
        super(StarFFN, self).__init__()
        self.left=nn.LazyLinear(output_dim)
        self.right=nn.LazyLinear(output_dim)
        self.concat=nn.LazyLinear(output_dim)
        self.act=nn.ReLU()
    
    def forward(self, x):
        x1=self.act(self.left(x))
        x2=self.act(self.right(x))
        return self.act(self.concat(x1*x2))

class ChannelAttention(nn.Module):
    def __init__(self, dim, addResidualConnection=False):
        super(ChannelAttention, self).__init__()
        self.addResidualConnection=addResidualConnection
        
        self.pool=nn.AdaptiveAvgPool2d(1)
        self.conv1=nn.Conv2d(dim, max(dim//8, 1), 1)
        self.act1=nn.GELU()
        self.conv2=nn.Conv2d(max(dim//8, 1), dim, 1)
        self.act2=nn.Sigmoid()
        
    def forward(self, x):
        attentionMap=self.pool(x).to(x.device)
        attentionMap=self.conv1(attentionMap)
        attentionMap=self.act1(attentionMap)
        attentionMap=self.conv2(attentionMap)
        attentionMap=self.act2(attentionMap)
        
        attentionX=attentionMap*x
        
        # ---------- 是否添加残差连接 ----------
        if self.addResidualConnection:
            x=x+attentionX
        else:
            x=attentionX
        return x

class PixelAttention(nn.Module):
    def __init__(self, dim, addResidualConnection=False):
        '''
        跨通道的、对每个位置的注意力
        '''
        super(PixelAttention, self).__init__()
        self.addResidualConnection=addResidualConnection
        self.generatePixelAttention=nn.Sequential(
            nn.Conv2d(dim, max(dim//8, 1), 1),
            nn.GELU(),
            nn.Conv2d(max(dim//8, 1), 1, 1),
            nn.Sigmoid()
        )
    def forward(self, x):
        attentionMap=self.generatePixelAttention(x)
        attentionX=attentionMap*x
        
        # ---------- 是否添加残差连接 ----------
        if self.addResidualConnection:
            x=x+attentionX
        else:
            x=attentionX
        return x

class FFN(nn.Module):
    def __init__(self, hidden_channels, out_channels, addResidualConnection=False):
        super(FFN, self).__init__()
        # lazy dim
        self.dim=None
        self.addResidualConnection=addResidualConnection
        # 对每个位置的单个像素进行了ffn，但是不对整体进行ffn，从而减少计算量
        self.channel_ffn=nn.Sequential(
            nn.LazyConv2d(out_channels=hidden_channels, kernel_size=1),
            nn.GELU(),
            nn.LazyConv2d(out_channels=out_channels, kernel_size=1)
        )
        self.ca, self.pa=None, None
    
    def forward(self, x):
        # ---------- lazy推断 ----------
        if self.ca is None and self.pa is None:
            self.dim=x.size(1)
            self.ca=ChannelAttention(self.dim, self.addResidualConnection).to(x.device)
            self.pa=PixelAttention(self.dim, self.addResidualConnection).to(x.device)
            self.add_module('ca', self.ca)  # 注册模块
            self.add_module('pa', self.pa)  # 注册模块

        x=torch.cat([self.ca(x), self.pa(x)], dim=1)
        return self.channel_ffn(x)
    

        
    
if __name__=='__main__':
    x=torch.rand([8, 3, 224, 224])
    model=FFN(hidden_channels=6, out_channels=12, addResidualConnection=True)
    print(model(x).shape)