import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fixed_kornia import warp_perspective
from typing import Tuple
from torchvision.models.resnet import resnet18

# class STN(nn.Module):
#     def __init__(self, in_shape, N_blocks=1):
#         super(STN, self).__init__()
#         self.in_shape = in_shape # (C, h, w)
#         self.N_blocks = N_blocks
#         __, h, w = in_shape
#         #print(in_shape)
#         self.fc_loc = []
#         for i in range(N_blocks):
#             self.fc_loc.append(nn.Sequential(
#                 nn.Linear(h*w, int(np.sqrt(h*w*6))),
#                 nn.ReLU(True),
#                 nn.Linear(int(np.sqrt(h*w*6)), 6)
#             ))
#             self.fc_loc[i][2].weight.data.zero_()
#             self.fc_loc[i][2].bias.data.copy_(torch.tensor([1,0,0,0,1,0], dtype=torch.float))
#         self.fc_loc = nn.ModuleList(self.fc_loc)

#     def forward(self, x, with_theta=False):
#         __, N, C, h, w = x.shape
#         res = torch.zeros_like(x)
#         xs = F.adaptive_avg_pool3d(x, output_size=(1, h, w)) # channel-wise pooling to (B, N, 1, h, w)
#         xs = xs.view(-1, self.N_blocks, h*w) # to (B, N, h*w)
#         thetas = torch.ones((xs.shape[0], self.N_blocks, 2, 3))
#         for i in range(self.N_blocks):
#             theta = self.fc_loc[i](xs[:,i])
#             theta = theta.view(-1,2,3)
#             thetas[:,i] = theta
#             grid = F.affine_grid(theta, x[:,i].size(), align_corners=False)
#             res[:,i] = F.grid_sample(x[:,i], grid, align_corners=False)
#         if with_theta:
#             return res, thetas.clone().detach()
#         else:
#             return res


class SelfAttentionModule(nn.Module):

    def __init__(self, t_dim: int, d_dim: int, dropout_rate: float = 0.):
        super(SelfAttentionModule, self).__init__()
        self.dropout = nn.Dropout(p=dropout_rate)
        self.weights = nn.Parameter(torch.empty(t_dim, d_dim), requires_grad = True)
        nn.init.xavier_uniform_(self.weights) # initialize weights
    
    def forward(self, M: torch.Tensor) -> Tuple[ torch.Tensor, torch.Tensor ]:
        '''Args:
        Input:
            M: 3D tensor - matrix to compute self-attention (BxHxW)
        Output:
            result: 3D tensor - result matrix (BxHxW)
            attention: 3D tensor - attention matrix (BxHxH)
        '''
        # self attention: Query matrix = Key matrix = Value matrix
        q = torch.mul(M, self.weights) # (b, h, w)
        k = M.permute(0, 2, 1) # (b, w, h)
        v = M # (b, h, w)
        attention = torch.bmm(q, k) # (b, h, h)
        attention = F.softmax(attention, dim = -1)
        attention = self.dropout(attention)
        output = attention.bmm(v) # (b, h, w)
        return output, attention



class maskSTN(nn.Module):

    def __init__(self, in_shape: Tuple[int, int, int, int], 
                 mode: str = 'affine',
                 align_corners: bool = False, 
                 interpolate: str = 'bilinear',
                 dropout_rate: float = 0.,
                 device: torch.device = 'cuda') -> None:
        '''Args:
        Input:
            mode:          str          - work mode of this block ('affine' or 'perspective')
            align_corners: bool         - 
            interpolate:   str          - mode of interpolate ('nearest' or 'bilinear')
            dropout_rate:  float        - 
            device:        torch.device -
        '''
        super(maskSTN, self).__init__()

        self.mode = mode
        self.device = device
        if mode == 'affine':
            self.out_channels: int = 6
            self.out_bias: torch.Tensor = torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float, device=self.device)
        elif mode == 'perspective':
            self.out_channels: int = 8
            self.out_bias: torch.Tensor = torch.tensor([1, 0, 0, 0, 1, 0, 0, 0], dtype=torch.float, device=self.device)
        else:
            raise NotImplementedError

        n, c, h, w = in_shape
        self.interpolate: str = interpolate
        self.align_corners: bool = align_corners
        self.blocks: int = n
        self.width: int = w
        self.height: int = h

        # modify resnet18 as localisation_net
        resnet: nn.Module = resnet18(pretrained=True, progress=False).to(self.device) # input channels: 3
        self.localisation_net: nn.Module = nn.Sequential(*list(
                    resnet18(pretrained=False, progress=False).children())[:-1]).to(self.device) # remove fc layer
        pretrained_dict = resnet.state_dict()
        loc_net_dict = self.localisation_net.state_dict()
        pretrained_dict = { k: v for k, v in pretrained_dict.items() if k in loc_net_dict }
        loc_net_dict.update(pretrained_dict) # update state dict
        self.localisation_net.load_state_dict(loc_net_dict) # load needed state dict
        for index, child in enumerate(self.localisation_net.children()):
            if index < 7: # freeze top 7 blocks, finetune the last block
                for name, param in child.named_parameters():
                    if not "BatchNorm2d" in name: # do not freeze batchnorm layer
                        param.requires_grad = False
        # out shape: (b, 512, 1, 1)
        self.loc_out_channels: int = 512 

        # gru module embeds relations among masks
        self.gru: nn.Module = nn.GRU(input_size = self.loc_out_channels, 
                                     hidden_size = self.loc_out_channels,
                                     num_layers = self.blocks,
                                     batch_first = True,
                                     bidirectional = True)
        # out shape: (b, n, hidden_size: 512 * 2) ~ (b, n, 2, 512)
        self.gru_out_channels: int = self.loc_out_channels
        # gru weight/bias initialization
        nn.init.orthogonal_(self.gru.weight_ih_l0, gain=1)
        nn.init.orthogonal_(self.gru.weight_hh_l0, gain=1)
        self.gru.bias_ih_l0.data.zero_()
        self.gru.bias_hh_l0.data.zero_()
        self.gru.to(self.device) # move gru to specified device

        # self-attention module
        self.sam: nn.Module = SelfAttentionModule(t_dim=self.blocks, 
                                                  d_dim=self.gru_out_channels,
                                                  dropout_rate=dropout_rate).to(self.device)
        # out shape: (b, n, 512)
        self.sam_out_channels: int = self.gru_out_channels

        # fully-connected layer
        fc_list: list = []
        for i in range(self.blocks):
            fc_module: nn.Module = nn.Sequential(nn.Linear(in_features = self.sam_out_channels,
                                                        out_features = 32),
                                                 nn.Tanh(),
                                                 nn.Linear(in_features = 32,
                                                           out_features = self.out_channels)).to(self.device)
            fc_module[2].weight.data.zero_()
            fc_module[2].bias.data.copy_(self.out_bias) # (n, 6: affine | 8: perspective)
            fc_list.append(fc_module)
        self.fc_list: nn.ModuleList = nn.ModuleList(fc_list)


    def stn(self, mask: torch.Tensor) -> torch.Tensor:

        # generate mask features
        loc_mask: torch.Tensor = torch.zeros((mask.size(0), self.blocks, self.loc_out_channels), device=self.device, dtype=torch.float)
        for i in range(self.blocks):
            loc_output = self.localisation_net(mask[:, i].unsqueeze(dim=1).expand(-1, 3, -1, -1)) # (b, 512, 1, 1)
            loc_mask[:, i] = loc_output.squeeze() # (b, 512)

        # embeds relations
        gru_mask: torch.Tensor
        gru_mask, __ = self.gru(loc_mask)
        gru_mask = gru_mask.view(-1, self.blocks, 2, self.gru_out_channels) # (b, n, 2, 512)
        gru_mask_sum: torch.Tensor = torch.sum(gru_mask, dim=2) # (b, n, 512)

        # calculate attention
        gru_mask_attn: torch.Tensor
        gru_mask_attn, __ = self.sam(gru_mask_sum)
        # shortcut
        gru_mask_res: torch.Tensor = torch.add(gru_mask_sum, gru_mask_attn) # (b, n, 512)

        # regression for transform matrix
        fc_mask: torch.Tensor = torch.zeros((mask.size(0), self.blocks, self.out_channels), 
                                device=self.device, dtype=torch.float) # (b, n, 6: affine | 8: perspective)
        for i in range(self.blocks):
            fc_mask[:, i] = self.fc_list[i](gru_mask_res[:, i])
        # complete 3x3 matrix
        if self.mode == 'perspective':
            expand: torch.Tensor = torch.ones((fc_mask.size(0) * self.blocks, 1), 
                                              device=self.device, dtype=torch.float) # (b*n, 1)
        else:
            expand: torch.Tensor = torch.tensor([0, 0, 1], \
                device=self.device, dtype=torch.float).repeat((fc_mask.size(0) * self.blocks), 1) # (b*n, 3)
        fc_mask = fc_mask.view(-1, self.out_channels) # (b*n, 6: affine | 8: perspective)
        stn_mask = torch.hstack((fc_mask, expand)) # (b*n, 9)
        stn_mask = stn_mask.view(-1, self.blocks, 3, 3) # (b, n, 3, 3)

        return stn_mask


    def forward(self, image: torch.Tensor, mask: torch.Tensor) -> Tuple [torch.Tensor, torch.Tensor, torch.Tensor]:
        '''Args:
        input:
            image: 5D tensor - feature map or image (BxNxCxHxW)
            mask: 4D tensor - 0/1 mask (BxNxHxW)
        output:
            image: 5D tensor - feature map or image after transform (BxNxCxHxW)
            mask: 4D tensor - 0/1 mask after transform (BxNxHxW)
            theta: 4D tensor - transform matrix (BxNx3x3)
        '''
        b, n, c, h, w = image.shape
        B, N, H, W = mask.shape

        if image.device != self.device:
            image = image.to(self.device)
        if mask.device != self.device:
            mask = mask.to(self.device)

        assert b == B and n == N and h == H and w == W, f"assuming image and mask have same shape, "\
                                                        f"but got image of shape {image.shape} and "\
                                                        f"mask of shape{mask.shape}."
        assert n == self.blocks and w == self.width and h == self.height, \
                f"assuming input shape consistant with initilizated parameters, "\
                f"but got input shape of {mask.shape[1:]} and "\
                f"initilized shape of {(self.blocks, self.height, self.width)}."
        assert image.device == mask.device, f"assuming image and mask on same device, "\
                                            f"but got image on {str(image.device)} and "\
                                            f"mask on {str(mask.device)}."

        theta = self.stn(mask)
        theta = theta.view(-1, 3, 3) # (b*n, 3, 3)
        image = image.view(-1, c, h, w) # (b*n, c, h, w)
        mask = mask.view(-1, h, w).unsqueeze(dim=1) # (b*n, 1, h, w)
        res: torch.Tensor = warp_perspective(image, theta, flags=self.interpolate,
                                             dsize=[h, w], align_corners=self.align_corners)
        mx: torch.Tensor = warp_perspective(mask, theta, flags=self.interpolate,
                                            dsize=[h, w], align_corners=self.align_corners)
        res = res.view(-1, n, c, h, w) # (b, n, c, h, w)
        mx = mx.squeeze().view(-1, n, h, w) # (b, n, h, w)
        theta = theta.view(-1, n, 3, 3) # (b, n, 3, 3)

        return res, mx, theta



class mPTN(nn.Module):
    def __init__(self, in_shape: Tuple[int, int, int, int], 
                 mode: str = 'affine',
                 align_corners: bool = False, 
                 interpolate: str = 'bilinear',
                 dropout_rate: float = 0.,
                 device: torch.device = 'cuda') -> None:
        '''Args:
        Input:
            mode:          str          - work mode of this block ('affine' or 'perspective')
            align_corners: bool         - 
            interpolate:   str          - mode of interpolate ('nearest' or 'bilinear')
            dropout_rate:  float        - 
            device:        torch.device -
        '''
        super(mPTN, self).__init__()

        self.mode = mode
        self.device = device
        if mode == 'affine':
            self.out_channels: int = 6
            self.out_bias: torch.Tensor = torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float, device=self.device)
        elif mode == 'perspective':
            self.out_channels: int = 8
            self.out_bias: torch.Tensor = torch.tensor([1, 0, 0, 0, 1, 0, 0, 0], dtype=torch.float, device=self.device)
        else:
            raise NotImplementedError

        n, c, h, w = in_shape
        self.interpolate: str = interpolate
        self.align_corners: bool = align_corners
        self.blocks: int = n
        self.width: int = w
        self.height: int = h
        self.in_channels: int = c

        # gru module embeds relations among masks
        self.gru: nn.Module = nn.GRU(input_size = self.in_channels, 
                                     hidden_size = self.in_channels // 2,
                                     num_layers = self.blocks,
                                     batch_first = True,
                                     bidirectional = True)
        # out shape: (b, n, c) ~ (b, n, 2, c//2)
        self.gru_out_channels: int = self.in_channels // 2
        # gru weight/bias initialization
        nn.init.orthogonal_(self.gru.weight_ih_l0, gain=1)
        nn.init.orthogonal_(self.gru.weight_hh_l0, gain=1)
        self.gru.bias_ih_l0.data.zero_()
        self.gru.bias_hh_l0.data.zero_()
        self.gru.to(self.device) # move gru to specified device

        # self-attention module
        self.sam: nn.Module = SelfAttentionModule(t_dim=self.blocks, 
                                                  d_dim=self.gru_out_channels,
                                                  dropout_rate=dropout_rate).to(self.device)
        # out shape: (b, n, c//2)
        self.sam_out_channels: int = self.gru_out_channels

        # fully-connected layer
        fc_list: list = []
        for i in range(self.blocks):
            fc_module: nn.Module = nn.Sequential(nn.Linear(in_features = self.sam_out_channels,
                                                        out_features = 32),
                                                 nn.Tanh(),
                                                 nn.Linear(in_features = 32,
                                                           out_features = self.out_channels)).to(self.device)
            fc_module[2].weight.data.zero_()
            fc_module[2].bias.data.copy_(self.out_bias) # (n, 6: affine | 8: perspective)
            fc_list.append(fc_module)
        self.fc_list: nn.ModuleList = nn.ModuleList(fc_list)


    def stn(self, mask: torch.Tensor) -> torch.Tensor:

        # embeds relations
        gru_mask: torch.Tensor
        gru_mask, __ = self.gru(loc_mask)
        gru_mask = gru_mask.view(-1, self.blocks, 2, self.gru_out_channels) # (b, n, 2, c//2)
        gru_mask_sum: torch.Tensor = torch.sum(gru_mask, dim=2) # (b, n, c//2)

        # calculate attention
        gru_mask_attn: torch.Tensor
        gru_mask_attn, __ = self.sam(gru_mask_sum)
        # shortcut
        gru_mask_res: torch.Tensor = torch.add(gru_mask_sum, gru_mask_attn) # (b, n, c//2)

        # regression for transform matrix
        fc_mask: torch.Tensor = torch.zeros((mask.size(0), self.blocks, self.out_channels), 
                                device=self.device, dtype=torch.float) # (b, n, 6: affine | 8: perspective)
        for i in range(self.blocks):
            fc_mask[:, i] = self.fc_list[i](gru_mask_res[:, i])
        # complete 3x3 matrix
        if self.mode == 'perspective':
            expand: torch.Tensor = torch.ones((fc_mask.size(0) * self.blocks, 1), 
                                              device=self.device, dtype=torch.float) # (b*n, 1)
        else:
            expand: torch.Tensor = torch.tensor([0, 0, 1], \
                device=self.device, dtype=torch.float).repeat((fc_mask.size(0) * self.blocks), 1) # (b*n, 3)
        fc_mask = fc_mask.view(-1, self.out_channels) # (b*n, 8)
        stn_mask = torch.hstack((fc_mask, expand)) # (b*n, 9)
        stn_mask = stn_mask.view(-1, self.blocks, 3, 3) # (b, n, 3, 3)

        return stn_mask


    def forward(self, image: torch.Tensor, mask: torch.Tensor) -> Tuple [torch.Tensor, torch.Tensor, torch.Tensor]:
        '''Args:
        input:
            image: 5D tensor - feature map or image (BxNxCxHxW)
            mask: 4D tensor - 0/1 mask (BxNxHxW)
        output:
            image: 5D tensor - feature map or image after transform (BxNxCxHxW)
            mask: 4D tensor - 0/1 mask after transform (BxNxHxW)
            theta: 4D tensor - transform matrix (BxNx3x3)
        '''
        b, n, c, h, w = image.shape
        B, N, H, W = mask.shape

        if image.device != self.device:
            image = image.to(self.device)
        if mask.device != self.device:
            mask = mask.to(self.device)

        assert b == B and n == N and h == H and w == W, f"assuming image and mask have same shape, "\
                                                        f"but got image of shape {image.shape} and "\
                                                        f"mask of shape{mask.shape}."
        assert n == self.blocks and w == self.width and h == self.height, \
                f"assuming input shape consistant with initilizated parameters, "\
                f"but got input shape of {mask.shape[1:]} and "\
                f"initilized shape of {(self.blocks, self.height, self.width)}."
        assert image.device == mask.device, f"assuming image and mask on same device, "\
                                            f"but got image on {str(image.device)} and "\
                                            f"mask on {str(mask.device)}."

        theta = self.stn(mask)
        theta = theta.view(-1, 3, 3) # (b*n, 3, 3)
        image = image.view(-1, c, h, w) # (b*n, c, h, w)
        mask = mask.view(-1, h, w).unsqueeze(dim=1) # (b*n, 1, h, w)
        res: torch.Tensor = warp_perspective(image, theta, flags=self.interpolate,
                                             dsize=[h, w], align_corners=self.align_corners)
        mx: torch.Tensor = warp_perspective(mask, theta, flags=self.interpolate,
                                            dsize=[h, w], align_corners=self.align_corners)
        res = res.view(-1, n, c, h, w) # (b, n, c, h, w)
        mx = mx.squeeze().view(-1, n, h, w) # (b, n, h, w)
        theta = theta.view(-1, n, 3, 3) # (b, n, 3, 3)

        return res, mx, theta