import torch
import torch.nn as nn
import torch.nn.functional as F

from ..tools.valid_tools import isconsistentinstance

__all__ = ['MaskGlobalCoVariancePool', 'mask_global_covariance_pool']


def _raise_errors(x: torch.tensor, mask: torch.tensor) -> None:
    if x.device != mask.device:
        raise TypeError(f"input and mask must on the same device, "
                        f"got input on {x.device} and mask on {mask.device}")
    if len(x.size()) != 4:
        raise ValueError(f"excepted input as a 4D tensor (BxCxHxW), "
                         f"but got a {len(x.size())}D tensor instead")    
    if len(mask.size()) != 3:
        raise ValueError(f"excepted mask as a 3D tensor (BxCxHxW), "
                         f"but got a {len(x.size())}D tensor instead")
    if x.size(0) != mask.size(0):
        raise ValueError(f"input and mask must have same batch size (dim0), "
                         f"got {x.size(0)} for input and {mask.size(0)} for mask")
    if x.size(2) != mask.size(1) or x.size(3) != mask.size(2):
        raise ValueError(f"input and mask must have same shape, "
                         f"got {x.size()[2:]} for input and {mask.size()[1:]} for mask")


class _mgcvp_c_impl(nn.Module):

    def __init__(self) -> None:
        super(_mgcvp_c_impl, self).__init__()

    def forward(self, x: torch.tensor, mask: torch.tensor) -> torch.tensor:
        _raise_errors(x, mask)
        x = x.view(x.size(0), x.size(1), -1)
        mask = mask.view(x.size(0), -1)

        mask_table: torch.tensor = torch.bmm(mask.unsqueeze(dim=2), mask.unsqueeze(dim=1))
        fact: torch.tensor = 1. / (x.size(1) - 1)
        x = x - torch.mean(x, dim=1, keepdim=True)

        return fact * torch.bmm(x.permute(0, 2, 1), x) * mask_table


class _mgcvp_hw_impl(nn.Module):

    def __init__(self) -> None:
        super(_mgcvp_hw_impl, self).__init__()
    
    def forward(self, x: torch.tensor, mask: torch.tensor) -> torch.tensor:
        _raise_errors(x, mask)
        x = x.view(x.size(0), x.size(1), -1)
        mask = mask.view(x.size(0), -1)

        fact: torch.tensor = 1. / (x.size(2) - 1)
        x = x - torch.mean(x * mask.unsqueeze(dim=1), dim=2, 
                           keepdim=True) / torch.mean(mask, dim=1, 
                                                      keepdim=True).unsqueeze(dim=1)
        x = x * mask.unsqueeze(dim=1)
        return fact * torch.bmm(x, x.permute(0, 2, 1))


class MaskGlobalCoVariancePool(nn.Module):

    def __init__(self, mode: str = 'channel') -> None:
        super(MaskGlobalCoVariancePool, self).__init__()
        self.mode = mode

        if mode == 'channel':
            self.pool: nn.Module = _mgcvp_c_impl()
        elif mode == 'spatial':
            self.pool: nn.Module = _mgcvp_hw_impl()
        else:
            raise NotImplementedError(f"this module supports 'channel' and 'spatial' mode so far, got {mode}")

    def forward(self, x: torch.tensor, mask: torch.tensor) -> torch.tensor:
        return self.pool(x, mask)

def mask_global_covariance_pool(x: torch.tensor, mask: torch.tensor, mode: str) -> torch.tensor:
    return MaskGlobalCoVariancePool(mode)(x, mask)