import torch
from torch import nn
from torch.nn import functional as F


class UAFM(nn.Module):
    def __init__(self, x_ch, y_ch, z_ch, out_ch, ksize=3):
        super().__init__()
        # self.conv_x = nn.S(
        #     x_ch, y_ch, kernel_size=ksize, padding=ksize // 2)    # , bias_attr=False
        self.conv_cat = nn.Sequential(
               nn.Conv2d(8192, 2048, 1),
               nn.BatchNorm2d(2048),
               nn.ReLU(inplace=True))

        self.conv_r = nn.Sequential(
                nn.Conv2d(2048, 2048, 1),
                nn.BatchNorm2d(2048),
                nn.ReLU(inplace=True))

        # self.conv_2 = nn.Sequential(
        #         nn.Conv2d(2048, 2048, 1),
        #         nn.BatchNorm2d(2048),
        #         nn.ReLU(inplace=True))
        #
        # self.conv_cat_1 = nn.Sequential(
        #         nn.Conv2d(4096, 2048, 1),
        #         nn.BatchNorm2d(2048),
        #         nn.ReLU(inplace=True))
        #
        # self.conv_cat_2 = nn.Sequential(
        #         nn.Conv2d(4096, 2048, 1),
        #         nn.BatchNorm2d(2048),
        #         nn.ReLU(inplace=True))
        #
        # self.conv_1 = nn.Sequential(
        #         nn.Conv2d(2048, 2048, 1),
        #         nn.BatchNorm2d(2048),
        #         nn.ReLU(inplace=True))
        #
        # self.conv_3 = nn.Sequential(
        #         nn.Conv2d(2048, 2048, 1),
        #         nn.BatchNorm2d(2048),
        #         nn.ReLU(inplace=True))
        #
        # self.conv_4 = nn.Sequential(
        #         nn.Conv2d(2048, 2048, 1),
        #         nn.BatchNorm2d(2048),
        #         nn.ReLU(inplace=True))
        #
        # self.conv_out = nn.Sequential(
        #         nn.Conv2d(2048, 2048, 1),
        #         nn.BatchNorm2d(2048),
        #         nn.ReLU(inplace=True))

        # """channel attention"""
        # self.shared_mlp = nn.Sequential(
        #         nn.Conv2d(2048, 2048 // 16, 1, bias=False),
        #         nn.ReLU(),
        #         nn.Conv2d(2048//16, 2048, 1, bias=False))
         #self.conv_out = ConvBNReLU(
        #     y_ch, out_ch, kernel_size=3, padding=1)    # , bias_attr=False
    # def chnanel(self, x, y, z):
    #     avg_pool = F.adaptive_avg_pool2d(x, 1)
    #     max_pool = F.adaptive_max_pool2d(x, 1)
    #     avg_pool_y = F.adaptive_avg_pool2d(y, 1)
    #     max_pool_y = F.adaptive_max_pool2d(y, 1)
    #     avg_pool_z = F.adaptive_avg_pool2d(z, 1)
    #     max_pool_z = F.adaptive_max_pool2d(z, 1)
    #     res_t = torch.cat([avg_pool, max_pool, avg_pool_y, max_pool_y], axis=1)
    #     return res_t

    # def fuse(self, x, y, z):
    #     out = x + y
    #     out = self.conv_out(out)
    #     return out

    def forward(self, x, y, z):
        # out = self.fuse(x, y)
        # res_c = self.chnanel(x, y)
        # avg_pool = F.adaptive_avg_pool2d(x, 1)
        # max_pool = F.adaptive_max_pool2d(x, 1)
        # avg_pool_y = F.adaptive_avg_pool2d(y, 1)
        # max_pool_y = F.adaptive_max_pool2d(y, 1)
        # avg_pool_z = F.adaptive_avg_pool2d(z, 1)
        # max_pool_z = F.adaptive_max_pool2d(z, 1)
        # res_t = torch.cat([avg_pool, max_pool, avg_pool_y, max_pool_y], axis=1)
        """mAP 78"""
        rest_f = self.conv_r((x+y)/2)
        avg_pool = F.adaptive_avg_pool2d(z, 1)
        max_pool = F.adaptive_max_pool2d(z, 1)
        avg_pool_r = F.adaptive_avg_pool2d(rest_f, 1)
        max_pool_r = F.adaptive_max_pool2d(rest_f, 1)
        # avg_pool = F.adaptive_avg_pool2d(x, 1)
        # max_pool = F.adaptive_max_pool2d(x, 1)
        # x_at = self.shared_mlp(avg_pool) + self.shared_mlp(max_pool)
        # y_avg_pool = F.adaptive_avg_pool2d(y, 1)
        # y_max_pool = F.adaptive_max_pool2d(y, 1)
        # y_at = self.shared_mlp(y_avg_pool) + self.shared_mlp(y_max_pool)
        # z_avg_pool = F.adaptive_avg_pool2d(z, 1)
        # z_max_pool = F.adaptive_max_pool2d(z, 1)
        # z_at = self.shared_mlp(z_avg_pool) + self.shared_mlp(z_max_pool)
        # res_c1 = self.conv_cat_1(torch.cat([self.conv_1(x_at),self.conv_2(y_at)], axis=1))
        # res_c2 = self.conv_cat_2(torch.cat([self.conv_3(res_c1),self.conv_4(z_at)], axis=1))
        
        res_t = torch.cat([avg_pool, max_pool, avg_pool_r, max_pool_r], axis=1)
        # res_c = self.conv_out(res_c2)
        tal = F.sigmoid(self.conv_cat(res_t))

        #res_c = self.conv_cat_1(res_t)
        #res_c = self.conv_1(res_c)
        #res_c_final = self.conv_cat_2(torch.cat([res_c, avg_pool_z, max_pool_z], axis=1))
        #tal = F.sigmoid(self.conv_2(res_c_final))
        return tal
