'''
Description: 
Author: Guan Xiongjun
Date: 2022-09-16 13:21:27
LastEditTime: 2022-09-20 12:43:05
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Function, Variable
import itertools

def compute_partial_repr(input_points, control_points):
    N = input_points.size(0)
    M = control_points.size(0)
    pairwise_diff = input_points.view(N, 1, 2) - control_points.view(1, M, 2)
    # original implementation, very slow
    # pairwise_dist = torch.sum(pairwise_diff ** 2, dim = 2) # square of distance
    pairwise_diff_square = pairwise_diff * pairwise_diff
    pairwise_dist = pairwise_diff_square[:, :, 0] + pairwise_diff_square[:, :, 1]
    repr_matrix = 0.5 * pairwise_dist * torch.log(pairwise_dist)
    # fix numerical error for 0 * log(0), substitute all nan with 0
    mask = repr_matrix != repr_matrix
    repr_matrix.masked_fill_(mask, 0)
    return repr_matrix

class TPSGridGen(nn.Module):

    def __init__(self, target_height, target_width, target_control_points,device):
        super(TPSGridGen, self).__init__()
        assert target_control_points.ndimension() == 2
        assert target_control_points.size(1) == 2
        N = target_control_points.size(0)
        self.num_points = N
        target_control_points = target_control_points.float()

        # create padded kernel matrix
        forward_kernel = torch.zeros(N + 3, N + 3)
        target_control_partial_repr = compute_partial_repr(target_control_points, target_control_points)
        forward_kernel[:N, :N].copy_(target_control_partial_repr)
        forward_kernel[:N, -3].fill_(1)
        forward_kernel[-3, :N].fill_(1)
        forward_kernel[:N, -2:].copy_(target_control_points)
        forward_kernel[-2:, :N].copy_(target_control_points.transpose(0, 1))
        # compute inverse matrix
        inverse_kernel = torch.inverse(forward_kernel)

        # create target cordinate matrix
        HW = target_height * target_width
        target_coordinate = list(itertools.product(range(target_height), range(target_width)))
        target_coordinate = torch.Tensor(target_coordinate) # HW x 2
        Y, X = target_coordinate.split(1, dim = 1)
        Y = Y * 2 / (target_height - 1) - 1
        X = X * 2 / (target_width - 1) - 1
        target_coordinate = torch.cat([X, Y], dim = 1) # convert from (y, x) to (x, y)
        target_coordinate_partial_repr = compute_partial_repr(target_coordinate, target_control_points)
        target_coordinate_repr = torch.cat([
            target_coordinate_partial_repr, torch.ones(HW, 1), target_coordinate
        ], dim = 1)

        # register precomputed matrices
        self.register_buffer('inverse_kernel', inverse_kernel.to(device))
        self.register_buffer('padding_matrix', torch.zeros(3, 2).to(device))
        self.register_buffer('target_coordinate_repr', target_coordinate_repr.to(device))

    def forward(self, source_control_points):
        assert source_control_points.ndimension() == 3
        assert source_control_points.size(1) == self.num_points
        assert source_control_points.size(2) == 2
        batch_size = source_control_points.size(0)

        Y = torch.cat([source_control_points, Variable(self.padding_matrix.expand(batch_size, 3, 2))], 1)
        mapping_matrix = torch.matmul(Variable(self.inverse_kernel), Y)
        source_coordinate = torch.matmul(Variable(self.target_coordinate_repr), mapping_matrix)
        return source_coordinate

class ConvLayer(nn.Module):
    def __init__(self, in_channels, out_channels,kernel_size=3, stride=1,padding=1):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(
                in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding
            ),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
        )

    def forward(self, x):
        return self.conv(x)

class TransNet(nn.Module):
    """input 480 x 480

    Args:
        nn (_type_): _description_
    """
    def __init__(self,device,image_height,image_width,n=4):
        super(TransNet, self).__init__()
        self.conv1 = ConvLayer(1,32,3,2,1)
        self.conv2 = ConvLayer(32,64,3,2,1)
        self.conv3 = ConvLayer(64,128,3,2,1)
        self.conv4 = ConvLayer(128,256,3,2,1)
        self.pool = nn.MaxPool2d(kernel_size=(6,4),stride=2,padding=0)
        self.linear1 = nn.Linear(13*14*256,1024)
        self.linear2 = nn.Linear(1024,2*n*n+4)

        self.device = device
        self.n = 4
        self.image_height = image_height
        self.image_width = image_width
        r1 = r2 = 0.9

        target_control_points = torch.Tensor(list(itertools.product(
            np.arange(-r1, r1 + 0.00001, 2.0  * r1 / (n - 1)),
            np.arange(-r2, r2 + 0.00001, 2.0  * r2 / (n - 1)),
        )))
        Y, X = target_control_points.split(1, dim = 1)
        target_control_points = torch.cat([X, Y], dim = 1)
        self.tps = TPSGridGen(image_height,image_width, target_control_points,device)
        self.target_control_points = target_control_points.to(device)

    def forward(self, x):
        batch_size = x.size(0)
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        x = self.conv4(x)
        x = self.pool(x)
        x = x.view(batch_size,-1)
        x = self.linear1(x)
        x = self.linear2(x)
        
        return x
    
    def apply(self,img, x):
        batch_size = x.size(0)
        Ad = x[:,:-4]

        s,theta,tx,ty = x[:,-4],x[:,-3],x[:,-2],x[:,-1]
        As = torch.zeros((batch_size,2,3)).to(self.device)
        As[:,0,0] = s*torch.cos(theta)
        As[:,0,1] = -s*torch.sin(theta)
        As[:,0,2] = tx
        As[:,1,0] = s*torch.sin(theta)
        As[:,1,1] = s*torch.cos(theta)
        As[:,1,2] = ty

        grid = (F.affine_grid(As, img.size(),align_corners=False)).float()
        img = F.grid_sample(img, grid,align_corners=False)

        source_control_points = Ad.view(x.size(0), -1, 2)+self.target_control_points.to(self.device)
        source_coordinate = self.tps(source_control_points)
        grid = source_coordinate.view(batch_size, self.image_height, self.image_width, 2)
        img = F.grid_sample(img, grid,align_corners=False)

        return img





