import torch
from torch.autograd import Function
from torch.nn.modules.module import Module
import os
from torch.autograd.function import once_differentiable
from torch.utils.cpp_extension import load

build_path = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../cppcuda/build/'))

print('compiling/loading roi_align')
roialign = load(name='roialign', sources=['lib/cppcuda/roi_align_binding.cpp',
                                          'lib/cppcuda/roi_align_forward_cuda.cu',
                                          'lib/cppcuda/roi_align_backward_cuda.cu'],
                build_directory=build_path, verbose=True)


class RoIAlignFunction(Function):
    # def __init__(ctx, pooled_height, pooled_width, spatial_scale, sampling_ratio):
    #     ctx.pooled_width = int(pooled_width)
    #     ctx.pooled_height = int(pooled_height)
    #     ctx.spatial_scale = float(spatial_scale)
    #     ctx.sampling_ratio = int(sampling_ratio)
    #     ctx.features_size = None
    #     ctx.rois=None

    @staticmethod
    def forward(ctx, features, rois, pooled_height, pooled_width, spatial_scale, sampling_ratio):
        # ctx.save_for_backward(rois)
        ctx.rois = rois
        ctx.features_size = features.size()
        ctx.pooled_height = pooled_height
        ctx.pooled_width = pooled_width
        ctx.spatial_scale = spatial_scale
        ctx.sampling_ratio = sampling_ratio

        # compute
        forward_fun = features.is_cuda and roialign.roi_align_forward_cuda or roialign.roi_align_forward_cpu
        output = forward_fun(features,
                             rois,
                             pooled_height,
                             pooled_width,
                             spatial_scale,
                             sampling_ratio)
        return output

    @staticmethod
    @once_differentiable
    def backward(ctx, grad_output):
        # rois, = ctx.saved_variables
        rois = ctx.rois
        features_size = ctx.features_size
        pooled_height = ctx.pooled_height
        pooled_width = ctx.pooled_width
        spatial_scale = ctx.spatial_scale
        sampling_ratio = ctx.sampling_ratio

        # rois = ctx.rois
        backward_fun = rois.is_cuda and roialign.roi_align_backward_cuda or roialign.roi_align_backward_cpu
        grad_input = backward_fun(rois,
                                  grad_output,
                                  features_size[0],
                                  features_size[1],
                                  features_size[2],
                                  features_size[3],
                                  pooled_height,
                                  pooled_width,
                                  spatial_scale,
                                  sampling_ratio)
        return grad_input, None, None, None, None, None


class RoIAlign(Module):
    def __init__(self, pooled_height, pooled_width, spatial_scale, sampling_ratio=0):
        super(RoIAlign, self).__init__()

        self.pooled_height = int(pooled_height)
        self.pooled_width = int(pooled_width)
        self.spatial_scale = float(spatial_scale)
        self.sampling_ratio = int(sampling_ratio)

    def forward(self, features, rois):
        # features is a Tensor of size BxCxHxW
        # rois is a (optional: list of) Tensor IDX,Xmin,Ymin,Xmax,Ymax (normalized to [0,1])
        rois = preprocess_rois(rois)
        output = RoIAlignFunction.apply(features,
                                        rois,
                                        self.pooled_height,
                                        self.pooled_width,
                                        self.spatial_scale,
                                        self.sampling_ratio)
        return output


def preprocess_rois(rois):
    # do some verifications on what has been passed as rois
    if isinstance(rois, list):  # if list, convert to single tensor (used for multiscale)
        rois = torch.cat(rois, dim=0)
    if isinstance(rois, torch.Tensor):
        if rois.dim() == 3:
            assert rois.size(0) == 1
            rois = rois.squeeze(0)
        if rois.size(1) == 4:
            # add zeros
            zeros = rois.new_zeros((rois.size(0), 1))
            rois = torch.cat((zeros, rois), 1).contiguous()
    return rois
