from numpy import prod
import torch
import torch.nn as nn
import torch.nn.functional as F

from ..bases.base_types import _size_T, _size_1_t, _size_2_t, _size_3_t, _size_any_t

__all__ = ['MaskAvgPool1d', 'MaskAvgPool2d', 'MaskAvgPool3d',
           'MaskAdaptiveAvgPool1d', 'MaskAdaptiveAvgPool2d', 'MaskAdaptiveAvgPool3d',
           'mask_avg_pool1d', 'mask_avg_pool2d', 'mask_avg_pool3d',
           'mask_adaptive_avg_pool1d', 'mask_adaptive_avg_pool2d', 'mask_adaptive_avg_pool3d']


def _raise_errors(x: torch.tensor, mask: torch.tensor, dimensions: int) -> None:
    if x.device != mask.device:
        raise TypeError(f"input and mask must on the same device, "
                        f"got input on {x.device} and mask on {mask.device}")
    if len(x.size()) != dimensions + 2:
        raise ValueError(f"excepted input as a {dimensions + 2}D tensor (BxCxHxW), "
                         f"but got a {len(x.size())}D tensor instead")
    if len(mask.size()) != dimensions + 1:
        raise ValueError(f"excepted mask as a {dimensions + 1}D tensor (BxCxHxW), "
                         f"but got a {len(x.size())}D tensor instead")
    if x.size(0) != mask.size(0):
        raise ValueError(f"input and mask must have same batch size (dim0), "
                         f"got {x.size(0)} for input and {mask.size(0)} for mask")
    if x.size()[2:] != mask.size()[1:]:
        raise ValueError(f"input and mask must have same shape, "
                         f"got {x.size()[2:]} for input and {mask.size()[1:]} for mask")


class _MaskAvgPool_Base_impl(nn.Module):

    def __init__(self, _impl_name: str, *args, **kwargs) -> None:
        super(_MaskAvgPool_Base_impl, self).__init__()

        assert getattr(nn, _impl_name, None) is not None  # should never assert this
        self.pool: nn.Module = getattr(nn, _impl_name)(*args, **kwargs)
        self.name: str = 'Mask' + _impl_name

        # save arguments
        self.kernel_size: _size_any_t = self.pool.kernel_size
        self.stride: _size_any_t = self.pool.stride
        self.padding: _size_any_t = self.pool.padding
        self.ceil_mode: bool = self.pool.ceil_mode
        self.count_include_pad: bool = self.pool.count_include_pad
        if _impl_name[-2] != '1':  # nn.AvgPool1d do not have this attribute
            self.divisor_override: bool = self.pool.divisor_override

    def forward(self, x: torch.tensor, mask: torch.tensor) -> torch.tensor:
        _raise_errors(x, mask, int(self.name[-2]))
        mask = mask.unsqueeze(dim=1)

        if isinstance(self.pool.kernel_size, _size_T):
            kernel_area: int = self.pool.kernel_size ** (len(x.size()) - 2)
        else:
            kernel_area: int = prod(self.pool.kernel_size)

        x_PoolRes: torch.tensor = self.pool(x * mask)
        mask_PoolRes: torch.tensor = self.pool(mask) + 1. / kernel_area

        return x_PoolRes / mask_PoolRes


class MaskAvgPool1d(_MaskAvgPool_Base_impl):
    '''
    Mask Average Pooling 1d
    Args:
        x    - torch.tensor - input tensor (BxCxL)
        mask - torch.tensor - mask tensor (BxL)
    Output:
        y    - torch.tensor - output tensor (BxCxL)
    '''

    def __init__(self, *args, **kwargs) -> None:
        super(MaskAvgPool1d, self).__init__('AvgPool1d', *args, **kwargs)


class MaskAvgPool2d(_MaskAvgPool_Base_impl):
    '''
    Mask Average Pooling 2d
    Args:
        x    - torch.tensor - input tensor (BxCxHxW)
        mask - torch.tensor - mask tensor (BxHxW)
    Output:
        y    - torch.tensor - output tensor (BxCxHxW)
    '''

    def __init__(self, *args, **kwargs) -> None:
        super(MaskAvgPool2d, self).__init__('AvgPool2d', *args, **kwargs)


class MaskAvgPool3d(_MaskAvgPool_Base_impl):
    '''
    Mask Average Pooling 3d
    Args:
        x    - torch.tensor - input tensor (BxCxDxHxW)
        mask - torch.tensor - mask tensor (BxDxHxW)
    Output:
        y    - torch.tensor - output tensor (BxCxDxHxW)
    '''

    def __init__(self, *args, **kwargs) -> None:
        super(MaskAvgPool3d, self).__init__('AvgPool3d', *args, **kwargs)


class _MaskAdaptiveAvgPool_Base_impl(nn.Module):

    def __init__(self, _impl_name: str,
                 output_size: _size_any_t) -> None:
        super(_MaskAdaptiveAvgPool_Base_impl, self).__init__()

        assert getattr(nn, _impl_name, None) is not None  # should never assert this
        self.pool: nn.Module = getattr(nn, _impl_name)(output_size)
        self.name: str = 'Mask' + _impl_name

        self.output_size: _size_any_t = self.pool.output_size

    def forward(self, x: torch.tensor, mask: torch.tensor) -> torch.tensor:
        _raise_errors(x, mask, int(self.name[-2]))
        mask = mask.unsqueeze(dim=1)

        # calculation of kernel size 
        # reference [https://blog.csdn.net/u013382233/article/details/85948695]
        if isinstance(self.output_size, _size_T):
            stride = [size // self.output_size for size in x.size()[2:]]
        else:
            stride = [size // output_size for size, output_size in zip(x.size()[2:], self.output_size)]
        kernel_size = [size - (self.output_size - 1) * s for size, s in zip(x.size()[2:], stride)]
        kernel_area = prod(kernel_size)

        x_PoolRes: torch.tensor = self.pool(x * mask)
        mask_PoolRes: torch.tensor = self.pool(mask) + 1. / kernel_area

        return x_PoolRes / mask_PoolRes


class MaskAdaptiveAvgPool1d(_MaskAdaptiveAvgPool_Base_impl):
    '''
    Mask Adaptive Average Pooling 1d
    Args:
        x    - torch.tensor - input tensor (BxCxL)
        mask - torch.tensor - mask tensor (BxL)
    Output:
        y    - torch.tensor - output tensor (BxCxL)
    '''

    def __init__(self, output_size: _size_1_t) -> None:
        super(MaskAdaptiveAvgPool1d, self).__init__('AdaptiveAvgPool1d', output_size)


class MaskAdaptiveAvgPool2d(_MaskAdaptiveAvgPool_Base_impl):
    '''
    Mask Adaptive Average Pooling 2d
    Args:
        x    - torch.tensor - input tensor (BxCxHxW)
        mask - torch.tensor - mask tensor (BxHxW)
    Output:
        y    - torch.tensor - output tensor (BxCxHxW)
    '''

    def __init__(self, output_size: _size_2_t) -> None:
        super(MaskAdaptiveAvgPool2d, self).__init__('AdaptiveAvgPool2d', output_size)


class MaskAdaptiveAvgPool3d(_MaskAdaptiveAvgPool_Base_impl):
    '''
    Mask Adaptive Average Pooling 3d
    Args:
        x    - torch.tensor - input tensor (BxCxDxHxW)
        mask - torch.tensor - mask tensor (BxDxHxW)
    Output:
        y    - torch.tensor - output tensor (BxCxDxHxW)
    '''

    def __init__(self, output_size: _size_3_t) -> None:
        super(MaskAdaptiveAvgPool3d, self).__init__('AdaptiveAvgPool3d', output_size)


def mask_avg_pool1d(x: torch.tensor, mask: torch.tensor, *args, **kwargs):
    return MaskAvgPool1d(*args, **kwargs)(x, mask)


def mask_avg_pool2d(x: torch.tensor, mask: torch.tensor, *args, **kwargs):
    return MaskAvgPool2d(*args, **kwargs)(x, mask)


def mask_avg_pool3d(x: torch.tensor, mask: torch.tensor, *args, **kwargs):
    return MaskAvgPool3d(*args, **kwargs)(x, mask)


def mask_adaptive_avg_pool1d(x: torch.tensor, mask: torch.tensor, output_size: _size_1_t):
    return MaskAdaptiveAvgPool1d(output_size)(x, mask)


def mask_adaptive_avg_pool2d(x: torch.tensor, mask: torch.tensor, output_size: _size_2_t):
    return MaskAdaptiveAvgPool2d(output_size)(x, mask)


def mask_adaptive_avg_pool3d(x: torch.tensor, mask: torch.tensor, output_size: _size_3_t):
    return MaskAdaptiveAvgPool3d(output_size)(x, mask)
