import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models.resnet as resnet
import torchvision.models.shufflenetv2 as shufflenet

from typing import Tuple, Union, Optional, List
from collections import OrderedDict
from collections.abc import Iterable

from .sam import SelfAttentionModule

__all__ = [ 'PerspectiveTransformNetworks' ]


class PerspectiveTransformNetworks(nn.Module):

    def __check_integer(self, _x: Iterable):
        for __x in _x:
            if not isinstance(__x, int):
                return False
        return True

    def __build_ptn_head(self, head: str, pretrained: bool = True, device: torch.device = 'cuda', 
                         freeze: Union[Iterable, int, None] = None) -> Tuple[ nn.Module, int ]:

        if not isinstance(freeze, (Iterable, int, type(None))):
            raise TypeError("excepted freeze options as a integer or a collections of integers, "
                            "if you do not want to specify the depth, pass NoneType instead")
        if isinstance(freeze, Iterable) and not self.__check_integer(freeze):
            raise ValueError("excepted freeze options as a collection of integers, specifies layer indices to be freezed")
        if isinstance(freeze, int):
            freeze = list(range(freeze))

        if head.startswith('resnet'):
            pre_build_net: Union[nn.Module, None] = getattr(resnet, head, None)(pretrained=pretrained, 
                                                                                progress=False).to(device)
        elif head.startswith('shufflenet'):
            pre_build_net: Union[nn.Module, None] = getattr(shufflenet, head, None)(pretrained=pretrained, 
                                                                                    progress=False).to(device)
        else:
            raise ValueError(f"excepted head as resnet or shufflenet, "
                                    f"but got {head} instead")
        if pre_build_net is None:
            raise NotImplementedError("failed to build specified head")

        ptn_head: nn.Module = nn.Sequential(*list(pre_build_net.children())[:-1]) # remove the last fc layer for classification
        if pretrained == True:
            pretrained_dict: OrderedDict = pre_build_net.state_dict()
            ptn_head_dict: OrderedDict = ptn_head.state_dict()
            pretrained_dict = { k: v for k, v in pretrained_dict.items() if k in ptn_head_dict}
            ptn_head_dict.update(pretrained_dict)
            ptn_head.load_state_dict(ptn_head_dict)
        if freeze is not None:
            for index, child in enumerate(ptn_head):
                if index in freeze:
                    for name, param in child.named_parameters():
                        if not ("BatchNorm2d" in name or "BatchNorm1d" in name):
                            param.requires_grad = False

        out_channels = -1
        out_channels = list(ptn_head.named_parameters())[-1][1].size(0)
        assert out_channels > 0, "there should be some output channels " # should never assert this
        return ptn_head, out_channels


    def __init__(self, in_shape: Tuple, 
                 mode: str = 'affine',
                 head: Optional[str] = None,
                 align_corners: bool = False, 
                 interpolate: str = 'bilinear',
                 dropout_rate: float = 0.,
                 device: torch.device = 'cpu') -> None:
        '''Args:
        Input:
            mode:          str          - work mode of this block ('affine' or 'perspective')
            align_corners: bool         - 
            interpolate:   str          - mode of interpolate ('nearest' or 'bilinear')
            dropout_rate:  float        - 
            device:        torch.device -
        '''
        super(PerspectiveTransformNetworks, self).__init__()
        if not isinstance(in_shape, Tuple):
            raise ValueError(f"excepted input shape as a Tuple, "
                             f"but got {type(in_shape)} instead")
        if not (len(in_shape) == 2 or len(in_shape) == 4):
            raise ValueError(f"exceped input shape as Tuple (B,C,H,W) or (B,C) with length of 4 or 2, "
                             f"but got a Tuple {in_shape} with length of {len(in_shape)} instead")
        if not self.__check_integer(in_shape):
            raise ValueError("excepted input shape as a Tuple of integers")

        self.mode = mode
        self.device = device
        if mode == 'affine':
            self.out_channels = 6
            self.out_bias: torch.tensor = torch.tensor([1, 0, 0, 0, 1, 0], 
                                                        dtype=torch.float, device=self.device)
        elif mode == 'perspective':
            self.out_channels = 8
            self.out_bias: torch.tensor = torch.tensor([1, 0, 0, 0, 1, 0, 0, 0], 
                                                        dtype=torch.float, device=self.device)
        else:
            raise NotImplementedError
        self.interpolate: str = interpolate
        self.align_corners: bool = align_corners

        if head is not None: # build a localisation network feature can pass through
            if not len(in_shape) == 4:
                raise ValueError(f"excepted input shape as (N, C, H, W) with length of 4, "
                                 f"but got a Tuple {in_shape} with length of {len(in_shape)} instead")
            n, c, h, w = in_shape
            if not (c == 1 or c == 3):
                raise ValueError("number of input image/mask channels must be 1 or 3")
            self.blocks: int = n
            self.in_channels: int = c
            self.width: int = w
            self.height: int = h
            self.localisation_net: nn.Module
            self.loc_out_channels: int
            self.loc_out_channels, self.loc_out_channels = self.__build_ptn_head(head)
        else: # assume feature has passed through a localisation network
            if not len(in_shape) == 2:
                raise ValueError(f"excepted input shape as (N, C) with length of 2, "
                                 f"but got a Tuple {in_shape} with length of {len(in_shape)} instead")
            n, c = in_shape
            self.blocks: int = n
            self.in_channels: int = c
            self.loc_out_channels: int = c
            self.localisation_net = None

        # gru module embeds relations among masks
        self.gru = nn.GRU(input_size = self.loc_out_channels, 
                          hidden_size = self.loc_out_channels,
                          num_layers = self.blocks,
                          batch_first = True,
                          bidirectional = True)
        # out shape: (b, n, hidden_size: 512 * 2) ~ (b, n, 2, 512)
        self.gru_out_channels: int = self.loc_out_channels
        # gru weight/bias initialization
        nn.init.orthogonal_(self.gru.weight_ih_l0, gain=1)
        nn.init.orthogonal_(self.gru.weight_hh_l0, gain=1)
        self.gru.bias_ih_l0.data.zero_()
        self.gru.bias_hh_l0.data.zero_()
        self.gru.to(self.device) # move gru to specified device

        # self-attention module
        self.sam = SelfAttentionModule(t_dim=self.blocks, 
                                       d_dim=self.gru_out_channels,
                                       dropout_rate=dropout_rate).to(self.device)
        # out shape: (b, n, 512)
        self.sam_out_channels: int = self.gru_out_channels

        # batch normalize
        self.normalizer = nn.BatchNorm1d(self.sam_out_channels)

        # fully-connected layer
        fc_list: list = []
        for i in range(self.blocks):
            fc_module = nn.Sequential(nn.Linear(in_features = self.sam_out_channels,
                                                out_features = 32),
                                      nn.Tanh(),
                                      nn.Linear(in_features = 32,
                                                out_features = self.out_channels)).to(self.device)
            fc_module[2].weight.data.zero_()
            fc_module[2].bias.data.copy_(self.out_bias) # (n, 6: affine | 8: perspective)
            fc_list.append(fc_module)
        self.fc_list = nn.ModuleList(fc_list)


    def stn(self, x: torch.tensor) -> torch.tensor:

        if self.localisation_net is not None: # there is a localisation network
            # generate localisation features
            loc_feat: torch.tensor = torch.zeros((x.size(0), self.blocks, self.loc_out_channels), 
                                                device=self.device, dtype=torch.float)
            for i in range(self.blocks):
                if self.in_channels == 1:
                    loc_input: torch.tensor = x[:, i].expand(-1, 3, -1, -1)
                elif self.in_channels == 3:
                    loc_input: torch.tensor = x[:, i]
                else:
                    raise ValueError("number of input tensor channel should be 1 or 3") # should never raise this
                loc_output: torch.tensor = self.localisation_net(loc_input) # (b, 512, 1, 1)
                loc_feat[:, i] = loc_output.squeeze() # (b, 512)
        else: # there isnt a localisation network
            loc_feat: torch.tensor = x.clone()

        # embeds relations
        gru_feat: torch.tensor
        gru_feat, __ = self.gru(loc_feat)
        gru_feat = gru_feat.view(-1, self.blocks, 2, self.gru_out_channels) # (b, n, 2, 512)
        gru_feat_sum: torch.tensor = torch.sum(gru_feat, dim=2) # (b, n, 512)

        # calculate attention
        gru_feat_attn: torch.tensor
        gru_feat_attn, __ = self.sam(gru_feat_sum)
        # shortcut
        gru_feat_res: torch.tensor = torch.add(gru_feat_sum, gru_feat_attn) # (b, n, 512)

        # batch norm
        gru_feat_res = gru_feat_res.permute([0, 2, 1]) # (b, 512, n)
        gru_feat_bn: torch.tensor = self.normalizer(gru_feat_res).permute([0, 2, 1]) # (b, n ,512)

        # regression for transform matrix
        fc_feat: torch.tensor = torch.zeros((x.size(0), self.blocks, self.out_channels), 
                                device=self.device, dtype=torch.float) # (b, n, 6: affine | 8: perspective)
        for i in range(self.blocks):
            fc_feat[:, i] = self.fc_list[i](gru_feat_bn[:, i])
        # complete 3x3 matrix
        if self.mode == 'perspective':
            expand: torch.tensor = torch.ones((fc_feat.size(0) * self.blocks, 1), 
                                              device=self.device, dtype=torch.float) # (b*n, 1)
        else:
            expand: torch.tensor = torch.tensor([0, 0, 1], \
                device=self.device, dtype=torch.float).repeat((fc_feat.size(0) * self.blocks), 1) # (b*n, 3)
        fc_feat = fc_feat.view(-1, self.out_channels) # (b*n, 6: affine | 8: perspective)
        stn_mat = torch.hstack((fc_feat, expand)) # (b*n, 9)
        stn_mat = stn_mat.view(-1, self.blocks, 3, 3) # (b, n, 3, 3)

        return stn_mat


    def forward(self, x: torch.tensor) -> torch.tensor:
        '''Args:
        input:
            x:      3D tensor - feature vector (BxNxC)
                or  5D tensor - feature map (BxNxCxHxW)
        output:
            theta: 4D tensor - transform matrix (BxNx3x3)
        '''
        if len(x.size()) == 4: # shape of (B, N, H, W)
            x = x.unsqueeze(dim=2)
        if x.device != self.device:
            x = x.to(self.device)
        
        if len(x.size()) == 5: # should pass through a head first
            __, n, c, h, w = x.shape
            assert n == self.blocks and w == self.width and h == self.height and c == self.in_channels, \
                    f"assuming input shape consistant with initilizated parameters, "\
                    f"but got input feature map shape of {x.shape[1:]} and "\
                    f"initilized shape of {(self.blocks, self.in_channels, self.height, self.width)}."
            x = self.localisation_net(x)
        elif len(x.size()) == 3:
            __, n, c = x.shape
            assert n == self.blocks and c == self.in_channels, \
                    f"assuming input shape consistant with initilizated parameters, "\
                    f"but got input feature map shape of {x.shape[1:]} and "\
                    f"initilized shape of {(self.blocks, self.in_channels)}."
        else:
            raise ValueError(f"excepted input tensor 'x' as 3D or 5D tensor,"\
                             f"but got a {len(x.size())}D tensor instead")

        theta = self.stn(x)
        theta = theta.view(-1, n, 3, 3) # (b*n, 3, 3)

        return theta