import torch 
import torch.nn as nn
import torch.nn.functional as F 
import fixed_kornia as kornia

from typing import Tuple
from torch.autograd import Function

class Suppressor(Function):
    '''
        if x < threshold:
            x = 0
        else:
            x = 1
    '''
    @staticmethod
    def forward(ctx, input, threshold: float):
        ctx.save_for_backward(input)
        ctx.threshold = threshold
        return (input > threshold).type(torch.float)
    
    @staticmethod
    def backward(ctx, grad_output):
        input, = ctx.saved_tensors
        grad_input = grad_output.clone()
        grad_input[input < ctx.threshold] = 0
        return grad_input, None # return as many parameters as inputs


class WarpLoss(nn.Module):
    '''

    '''

    def __init__(self, alpha: float = 1., threshold: float = 0., interpolate: str = 'bilinear', dist: bool = False):
        '''Args:
            alpha:         float - weight of Dist Loss 
            threshold:     float - 
            interpolate:   str   - mode of interpolate ('nearest' or 'bilinear')
            dist:          bool  - return integrated or individual losses
        '''
        super(WarpLoss, self).__init__()
        self.alpha: float = alpha
        self.threshold: float = threshold
        self.interpolate: str = interpolate
        self.dist: bool = dist
        self.__suppressor = Suppressor.apply
    
    def forward(self, x: torch.Tensor, theta: torch.Tensor, align_corners: bool = False):
        '''Args:
        input:
            mask:          3D tensor - 0/1 mask (BxHxW)
            theta:         3D tensor - perspective matrix (Bx3x3) or affine matrix (Bx2x3)
            align_corners: bool      - 
        output:
            loss: tensor - warp loss
        '''
        B, H, W = x.size()
        b, h, w = theta.size()
        assert B == b, 'mask and theta should have same batch size'
        assert (h == 2 or h == 3) and (w == 3), 'theta should be perspective matrix(3x3) or affine matrix(2x3)'
        # print(x.requires_grad, theta.requires_grad) # False, True

        if h == 2: # convert affine matrix to homography
            theta = F.pad(theta, [0,0, 0,1], "constant", value=0.)
            theta[..., -1, -1] += 1.0
        theta_inv: torch.Tensor = torch.inverse(theta.cpu()).to(theta.device) # avoid runtime: kernel image error
        # print(theta_inv.requires_grad) # True
        # calculate transformed masks
        mask_dst: torch.Tensor = kornia.warp_perspective(x.unsqueeze(dim=1), theta,
                                                         flags=self.interpolate,
                                                         dsize=(H,W),
                                                         align_corners=align_corners).squeeze()
        # calculate inversed projected masks
        mask_src: torch.Tensor = kornia.warp_perspective(mask_dst.unsqueeze(dim=1), theta_inv,
                                                         flags=self.interpolate,
                                                         dsize=(H,W),
                                                         align_corners=align_corners).squeeze()
        # print(mask_dst.requires_grad, mask_src.requires_grad) # True, True
        # suppress small scalar
        mask_src = self.__suppressor(mask_src, self.threshold)
        mask_dst = self.__suppressor(mask_dst, self.threshold)
        # print(mask_dst.requires_grad, mask_src.requires_grad) # True, True

        ''' IoU loss '''
        # calculate overlaps
        intersection_area: torch.Tensor = torch.sum(torch.mul(mask_src, x), dim = [1, 2])
        x_area: torch.Tensor = torch.sum(x, dim = [1, 2])
        projected_area: torch.Tensor = torch.sum(mask_src, dim = [1, 2])
        union_area: torch.Tensor = torch.sub(torch.add(x_area, projected_area), intersection_area)
        # calculate IoU
        IoU: torch.Tensor = torch.div(intersection_area + 1, union_area + 1) # laplace smoothing
        # print(IoU.requires_grad) # True
        # calculate IoU Loss
        iou_loss: torch.Tensor = -torch.mean(torch.log(IoU))

        ''' area loss '''
        # calculate area of transformed mask
        transformed_area: torch.Tensor = torch.sum(mask_dst, dim = [1, 2])
        # calculate area occupancy
        Area: torch.Tensor = torch.div(transformed_area + 1, H*W + 1) # laplace smoothing
        # print(Area.requires_grad) # True
        # calculate Area Loss
        area_loss: torch.Tensor = -torch.mean(torch.log(Area))

        ''' center loss '''
        # calculate centers
        # w_r = torch.arange(start = .5, end = W) # (W, )
        # w_r = w_r.repeat(B, H, 1).to(mask_src.device) # (B, H, W)
        # h_r = torch.arange(start = .5, end = H).unsqueeze(dim = 1) # (H, 1)
        # h_r = h_r.repeat(B, 1, W).to(mask_src.device) # (B, H, W)
        # mask_center_w = torch.mean(torch.mul(w_r, x), dim = [1, 2]).unsqueeze(dim=1) # (B, 1)
        # mask_center_h = torch.mean(torch.mul(h_r, x), dim = [1, 2]).unsqueeze(dim=1) # (B, 1)
        # mask_center_1 = torch.ones((B,1), device=mask_src.device)
        # mask_center_wh1 = torch.hstack((mask_center_w, mask_center_h, mask_center_1)).unsqueeze(dim=2) # (B, 3, 1)
        # mask_dst_center_wh1 = torch.bmm(theta, mask_center_wh1) # (B, 3, 3) x (B, 3, 1) = (B, 3, 1)
        # # print(mask_dst_center_wh1.requires_grad) # True
        # # mask_dst_center_w = torch.div(mask_dst_center_wh1[:, 0], mask_dst_center_wh1[:, 2]) # (B, 1) :: x_ = x / z
        # # mask_dst_center_h = torch.div(mask_dst_center_wh1[:, 1], mask_dst_center_wh1[:, 2]) # (B, 1) :: y_ = y / z
        # mask_dst_center_w = mask_dst_center_wh1[:, 0]
        # mask_dst_center_h = mask_dst_center_wh1[:, 1]
        # # print(mask_dst_center_w.requires_grad, mask_dst_center_h.requires_grad) # True, True
        # # calculate euclidean distance between transformed mask center and feature map center
        # distance: torch.Tensor = torch.add(torch.pow(torch.sub(mask_dst_center_w, W//2), 2.),
        #                                    torch.pow(torch.sub(mask_dst_center_h, H//2), 2.))
        # # calculate euclidean distance between mask center and feature map center
        # factor: torch.Tensor = torch.add(torch.pow(torch.sub(mask_center_w, W//2), 2.),
        #                                  torch.pow(torch.sub(mask_center_h, H//2), 2.))
        # factor = torch.clamp(factor, 1e-6) # avoid DividedByZero error
        # # normalize distance
        # Dist: torch.Tensor = torch.div(distance, factor)
        # # calculate Dist Loss
        # dist_loss: torch.Tensor = torch.mean(torch.mul(Dist, mask_weights))
        # # print(dist_loss.requires_grad) # True
        dist_loss = torch.tensor([0.], device=x.device, dtype=torch.float)

        # calculate warp loss
        if self.dist:
            return iou_loss, area_loss, dist_loss
        else:
            return iou_loss + area_loss + dist_loss * self.alpha