import torch
import torch.nn as nn
import numpy as np

class ChannelAttention(nn.Module):
    def __init__(self, in_channels, reduction:int = 4):
        super().__init__()
        self.maxpool = nn.AdaptiveMaxPool2d(1)
        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            # Conv2d比Linear方便操作
            # nn.Linear(channel, channel // reduction, bias=False)
            nn.Conv2d(in_channels, in_channels // reduction, 1, bias=False),
            # inplace=True直接替换，节省内存
            nn.ReLU(inplace=True),
            # nn.Linear(channel // reduction, channel,bias=False)
            nn.Conv2d(in_channels // reduction, in_channels, 1, bias=False)
        )
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        b, c, _, _= x.shape
        max_out = self.fc(self.maxpool(x)).view(b, c, 1, 1)
        avg_out = self.fc(self.avgpool(x)).view(b, c, 1, 1)
        out = max_out + avg_out
        return x * self.sigmoid(out).expand_as(x)
class ECAttention(nn.Module):

    def __init__(self, kernel_size=3):
        super().__init__()
        self.gap=nn.AdaptiveAvgPool2d(1)
        self.conv=nn.Conv1d(1,1,kernel_size=kernel_size, padding=(kernel_size-1)//2)
        self.sigmoid=nn.Sigmoid()

    def init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal_(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant_(m.weight, 1)
                init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal_(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant_(m.bias, 0)

    def forward(self, x):
        y=self.gap(x) #bs,c,1,1
        y=y.squeeze(-1).permute(0,2,1) #bs,1,c
        y=self.conv(y) #bs,1,c
        y=self.sigmoid(y) #bs,1,c
        y=y.permute(0,2,1).unsqueeze(-1) #bs,c,1,1
        return x*y.expand_as(x)


class SpatialAttention(nn.Module):
    def __init__(self, kernel_size:int=7):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels=2, out_channels=1, kernel_size=kernel_size, padding=kernel_size//2, bias=True),
            nn.Sigmoid(),
        )

    def forward(self, x):
        max_in, _ = torch.max(x, dim=1, keepdim=True)
        mean_in = torch.mean(x, dim=1, keepdim=True)
        inputs = torch.cat([max_in, mean_in], dim=1)
        return x * self.conv(inputs).expand_as(x)

class CBAM(nn.Module):
    def __init__(self, channel):
        super(CBAM, self).__init__()
        self.channel_attention = ChannelAttention(channel)
        self.spatial_attention = SpatialAttention()

    def forward(self, x):
        out = self.channel_attention(x) * x
        out = self.spatial_attention(out) * out
        return out

class PLA_CBAM(nn.Module):
    def __init__(self, channel):
        super(PLA_CBAM, self).__init__()
        self.channel_attention = ECAttention(3)
        self.spatial_attention = SpatialAttention()

    def forward(self, x):
        out1_channel = self.channel_attention(x) * x
        out1_spatial = self.spatial_attention(x) * x
        out2_spatial = self.spatial_attention(out1_channel) * out1_channel
        out2_channel = self.channel_attention(out1_spatial) * out1_spatial
        return ((out2_channel + out2_spatial) / 2).trunc()
if __name__ == '__main__':
    attention = ECAttention(3)
    init = torch.randn(3,24,128,128)
    result = attention(init)
    print(result.shape)
