import torch
from torch import nn
from torch.nn import functional as F


class ConvBNReLU(nn.Module):
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 padding=1,
                 **kwargs):
        super().__init__()

        self._conv = nn.Conv2d(
            in_channels, out_channels, kernel_size, padding=padding, **kwargs)

        # if 'data_format' in kwargs:
        #     data_format = kwargs['data_format']
        # else:
        #     data_format = 'NCHW'
        self._batch_norm = nn.BatchNorm2d(out_channels)    # , data_format=data_format
        self._relu = nn.ReLU()

    def forward(self, x):
        x = self._conv(x)
        x = self._batch_norm(x)
        x = self._relu(x)
        return x

class UAFM(nn.Module):
    """
    The base of Unified Attention Fusion Module.
    Args:
        x_ch (int): The channel of x tensor, which is the low level feature.
        y_ch (int): The channel of y tensor, which is the high level feature.
        out_ch (int): The channel of output tensor.
        ksize (int, optional): The kernel size of the conv for x tensor. Default: 3.
        resize_mode (str, optional): The resize model in unsampling y tensor. Default: bilinear.
    """

    def __init__(self, x_ch, y_ch, out_ch, ksize=3, resize_mode='bilinear'):
        super().__init__()

        self.conv_x = ConvBNReLU(
            x_ch, y_ch, kernel_size=ksize, padding=ksize // 2)    # , bias_attr=False
        self.conv_out = ConvBNReLU(
            y_ch, out_ch, kernel_size=3, padding=1)    # , bias_attr=False
        self.resize_mode = resize_mode

    def check(self, x, y):
        assert x.ndim == 4 and y.ndim == 4
        x_h, x_w = x.shape[2:]
        y_h, y_w = y.shape[2:]
        assert x_h >= y_h and x_w >= y_w

    def prepare(self, x, y):
        x = self.prepare_x(x, y)
        y = self.prepare_y(x, y)
        return x, y

    def prepare_x(self, x, y):
        x = self.conv_x(x)
        return x

    def prepare_y(self, x, y):
        y_up = F.interpolate(y, x.shape[2:], mode=self.resize_mode)
        return y_up

    def fuse(self, x, y):
        out = x + y
        out = self.conv_out(out)
        return out

    def forward(self, x, y):
        """
        Args:
            x (Tensor): The low level feature.
            y (Tensor): The high level feature.
        """
        self.check(x, y)
        x, y = self.prepare(x, y)
        out = self.fuse(x, y)
        return out


class ConvBNAct(nn.Module):
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 padding=1,
                 act_type=None,
                 **kwargs):
        super().__init__()

        self._conv = nn.Conv2d(
            in_channels, out_channels, kernel_size, padding=padding, **kwargs)

        # if 'data_format' in kwargs:
        #     data_format = kwargs['data_format']
        # else:
        #     data_format = 'NCHW'
        self._batch_norm = nn.BatchNorm2d(out_channels)    # , data_format=data_format

        self._act_type = act_type
        if act_type is not None:
            self._act = nn.LeakyReLU()

    def forward(self, x):
        x = self._conv(x)
        x = self._batch_norm(x)
        if self._act_type is not None:
            x = self._act(x)
        return x


# class ConvBN(nn.Module):
#     def __init__(self,
#                  in_channels,
#                  out_channels,
#                  kernel_size,
#                  padding=1,
#                  **kwargs):
#         super().__init__()
#         self._conv = nn.Conv2d(
#             in_channels, out_channels, kernel_size, padding=padding, **kwargs)
#         # if 'data_format' in kwargs:
#         #     data_format = kwargs['data_format']
#         # else:
#         #     data_format = 'NCHW'
#         self._batch_norm = nn.BatchNorm2d(out_channels)    # , data_format=data_format

#     def forward(self, x):
#         x = self._conv(x)
#         x = self._batch_norm(x)
#         return x




def avg_max_reduce_hw_helper(x, is_training, use_concat=True):
    assert not isinstance(x, (list, tuple))
    avg_pool = F.adaptive_avg_pool2d(x, 1)
    # TODO(pjc): when axis=[2, 3], the paddle.max api has bug for training.
    # if is_training:
    max_pool = F.adaptive_max_pool2d(x, 1)
    # else:
        # max_pool = torch.max(x, [2, 3], keepdim=True)
        # max_pool = torch.max(x, 2, keepdim=True)

    if use_concat:
        res = torch.cat([avg_pool, max_pool], axis=1)
    else:
        res = [avg_pool, max_pool]
    return res

def avg_max_reduce_hw(x, is_training):
    # Reduce hw by avg and max
    # Return cat([avg_pool_0, avg_pool_1, ..., max_pool_0, max_pool_1, ...])
    if not isinstance(x, (list, tuple)):
        return avg_max_reduce_hw_helper(x, is_training)
    elif len(x) == 1:
        return avg_max_reduce_hw_helper(x[0], is_training)
    else:
        # res_avg = []
        # res_max = []
        gcp_l = []
        for xi in x:
            avg, max = avg_max_reduce_hw_helper(xi, is_training, False)
            # res_avg.append(avg)
            # res_max.append(max)
            gcp = avg + max
            gcp_l.append(gcp)
        # res = res_avg + res_max
        return torch.cat(gcp_l, axis=1)

class UAFM_ChAtten(UAFM):
    """
    The UAFM with channel attention, which uses mean and max values.
    Args:
        x_ch (int): The channel of x tensor, which is the low level feature.
        y_ch (int): The channel of y tensor, which is the high level feature.
        out_ch (int): The channel of output tensor.
        ksize (int, optional): The kernel size of the conv for x tensor. Default: 3.
        resize_mode (str, optional): The resize model in unsampling y tensor. Default: bilinear.
    """

    def __init__(self, x_ch, y_ch, out_ch, ksize=3, resize_mode='bilinear'):
        super().__init__(x_ch, y_ch, out_ch, ksize, resize_mode)

        self.conv_xy_atten = nn.Sequential(
            ConvBNAct(
                # 4 * y_ch,
                2 * y_ch,
                # y_ch // 2,
                y_ch,
                kernel_size=3,
                # bias_attr=False,
                act_type="leakyrelu"),)
            # ConvBN(
            #     y_ch // 2, y_ch, kernel_size=3))    # , bias_attr=False

    def fuse(self, x, y):
        """
        Args:
            x (Tensor): The low level feature.
            y (Tensor): The high level feature.
        """
        atten = avg_max_reduce_hw([x, y], self.training)
        atten = torch.sigmoid(self.conv_xy_atten(atten))

        out = x * atten + y * (1 - atten)
        out = self.conv_out(out)
        return out


if __name__ == '__main__':
    input_a = torch.randn(128, 1024, 16, 16)
    input_b = torch.randn(128, 2048, 16, 16)
    uafm = UAFM_ChAtten(1024, 2048, 2048)
    res = uafm(input_a, input_b)
    print(res.shape)
