import torch
from torch import nn
from torch.nn import functional as F

class ASFFS(nn.Module):
    def __init__(self):
        super(ASFFS, self).__init__()
        # self.con_up = nn.Conv2d(1024, 2048, 1)
        self.excit_0 = nn.Sequential(
            nn.Conv2d(2048, 1024, 1),
            nn.ReLU(),
        )
        self.excit_1 = nn.Conv2d(1024, 2048, 1)
        self.excit_2 = nn.Conv2d(1024, 2048, 1)


    def forward(self, feat1, feat2):
        # x = feats[0]
        # y = feats[-1]
        # x, y = self.con_up(feat1), feat2
        x, y = feat1, feat2


        f_squeeze = F.adaptive_avg_pool2d(x + y, 1)
        res_ex_0 = self.excit_0(f_squeeze)
        res_ex_1 = self.excit_1(res_ex_0)
        res_ex_2 = self.excit_2(res_ex_0)

        res_cat = torch.cat([res_ex_1, res_ex_2], dim=-1)
        att = F.softmax(res_cat, dim=-1)
        res_fuse = att[:, :, :, 0].unsqueeze(-1)*x + att[:, :, :, 1].unsqueeze(-1)*y
        return res_fuse
    

class ChannelAttentionModule(nn.Module):
    def __init__(self, channel, ratio=16):
        super(ChannelAttentionModule, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)

        self.shared_MLP = nn.Sequential(
            nn.Conv2d(channel, channel // ratio, 1, bias=False),
            nn.ReLU(),
            nn.Conv2d(channel // ratio, channel, 1, bias=False),
        )

        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avg_out = self.shared_MLP(self.avg_pool(x))
        max_out = self.shared_MLP(self.max_pool(x))
        return self.sigmoid(avg_out + max_out)