import torch
from torch import nn

class EMA(nn.Module):
    def __init__(self, channels, c2=None, factor=1):
        super(EMA, self).__init__()
        self.groups = factor
        assert channels // self.groups > 0
        self.softmax = nn.Softmax(-1)
        self.agp = nn.AdaptiveAvgPool2d((1, 1))
        self.pool_h = nn.AdaptiveAvgPool2d((None, 1))
        self.pool_w = nn.AdaptiveAvgPool2d((1, None))
        self.gn = nn.GroupNorm(channels // self.groups, channels // self.groups)
        self.conv1x1 = nn.Conv2d(channels // self.groups, channels // self.groups, kernel_size=1, stride=1, padding=0)
        self.conv3x3 = nn.Conv2d(channels // self.groups, channels // self.groups, kernel_size=3, stride=1, padding=1)
        self.deconv1 = nn.ConvTranspose2d(
            in_channels=channels, 
            out_channels=channels//2, 
            kernel_size=4, 
            stride=2,
            padding=1,
            output_padding=0,
            bias=False
        )
        self.deconv2 = nn.ConvTranspose2d(
            in_channels=channels//2, 
            out_channels=channels//4, 
            kernel_size=4, 
            stride=2,
            padding=1,
            output_padding=0,
            bias=False
        )
        self.deconv3 = nn.ConvTranspose2d(
            in_channels=channels//4, 
            out_channels=channels//4, 
            kernel_size=4, 
            stride=2,
            padding=1,
            output_padding=0,
            bias=False
        )
        self.deconv4 = nn.ConvTranspose2d(
            in_channels=channels//4, 
            out_channels=channels//8, 
            kernel_size=3, 
            stride=1,
            padding=1,
            output_padding=0,
            bias=False
        )

    def forward(self, x, feat_big):
        b, c, h, w = x.size()
        group_x = x.reshape(b * self.groups, -1, h, w)  # b*g,c//g,h,w ([4, 3, 224, 224])
        x_h = self.pool_h(group_x)#([4, 3, 224, 1])
        x_w = self.pool_w(group_x).permute(0, 1, 3, 2) #([4, 3, 224, 1])
        hw = torch.cat([x_h, x_w], dim=2)#hw:([4, 3, 448, 1])
        hw = self.conv1x1(hw) #([4, 3, 448, 1])
        x_h, x_w = torch.split(hw, [h, w], dim=2)#x_h:([4, 3, 224, 1])x_w:([4, 3, 224, 1])
        x1 = self.gn(group_x * x_h.sigmoid() * x_w.permute(0, 1, 3, 2).sigmoid())#([4, 3, 224, 224])
        #([4, 3, 224, 224])*([4, 3, 224, 1])*([4, 3, 1, 224])保留前两维，后两维进行矩阵乘法
        x2 = self.conv3x3(group_x)#([4, 3, 224, 224])
        x11 = self.softmax(self.agp(x1).reshape(b * self.groups, -1, 1).permute(0, 2, 1))#([4, 1, 3])
        #self.agp(x1):([4, 3, 1, 1])
        x12 = x2.reshape(b * self.groups, c // self.groups, -1)  # b*g, c//g, hw  x12:([4, 3, 50176])
        x21 = self.softmax(self.agp(x2).reshape(b * self.groups, -1, 1).permute(0, 2, 1))#([4, 1, 3])
        x22 = x1.reshape(b * self.groups, c // self.groups, -1)  # b*g, c//g, hw#([4, 3, 50176])
        weights = (torch.matmul(x11, x12) + torch.matmul(x21, x22)).reshape(b * self.groups, 1, h, w)#([4, 1, 224, 224])
        middle = (group_x * weights.sigmoid()).reshape(b, c, h, w)#([4, 512, 8, 8]) ([4, 256, 16, 16]) ([4, 256, 32, 32])
        middle1 = self.deconv1(middle)#([4, 256, 16, 16])
        middle2 = self.deconv2(middle1)#([4, 128, 32, 32])
        middle3 = self.deconv3(middle2)#([4, 128, 64, 64])
        if h == 32:
            middle4 = self.deconv4(middle3)    
            return middle4 + feat_big
        else:
            return middle3 + feat_big