from __future__ import absolute_import, division

import torch
import torch.nn as nn
from einops import rearrange

from custom.ops.deform_utils import th_batch_map_offsets_torch, th_generate_grid

from mmcv.cnn import UPSAMPLE_LAYERS
@UPSAMPLE_LAYERS.register_module("conv_offset_2d_upsample")
class ConvOffset2DUpSample(nn.Conv2d):
    """ConvOffset2D for upsample module, modified from ConvOffset2D
    Convolutional layer responsible for learning the 2D offsets and output the
    deformed feature map using bilinear interpolation
    Note that this layer does not perform convolution on the deformed feature
    map. See get_deform_cnn in cnn.py for usage
    """
    def __init__(self, in_channels, scale_factor=2, up_padding_mode="zeros", res_mode=False, **kwargs):
        """Init
        Parameters
        ----------
        filters : int
            Number of channel of the input feature map
        init_normal_stddev : float
            Normal kernel initialization
        **kwargs:
            Pass to superclass. See Con2d layer in pytorch
        """
        self.in_channels = in_channels
        self.scale_factor = scale_factor
        self._grid_param = None
        self.up_padding_mode = up_padding_mode
        # offset 残差模式
        self.res_mode = res_mode
        # bias 设为 True，参照deform conv
        super(ConvOffset2DUpSample, self).__init__(self.in_channels, self.scale_factor**2 *2, 3, padding=1, bias=True, **kwargs)
        self.ps = nn.PixelShuffle(scale_factor)
        # self.weight.data.copy_(self._init_weights(self.weight, init_normal_stddev))
        # self.init_weights()
        

    def forward(self, x):
        """Return the deformed featured map"""
        # x shape [b, c, h, w]
        x_shape = x.size()
        # offsets shape [b, 2*scale**2, h, w]
        offsets_res = super(ConvOffset2DUpSample, self).forward(x)
        if self.res_mode:
            # 当使用残差模式时, scale默认为2, offset_2 = offset_2 + res_1, offset_3 = offset_1 + res_3, offset_4 = (offset_2 + offset_3) / 2 + res_4
            base = offsets_res[:,:2,:,:]
            res_2 = offsets_res[:,2:4,:,:]
            res_3 = offsets_res[:,4:6,:,:]
            res_4 = offsets_res[:,6:8,:,:]
            offset_2 = base + res_2
            offset_3 = base + res_3
            offset_4 = (offset_2 + offset_3) / 2 + res_4
            offsets = torch.cat((base, offset_2, offset_3, offset_4), dim=1)
        else:
            offsets = offsets_res
        # offsets: (b*scale**2, h, w, 2)
        offsets = self._to_bs_h_w_2(offsets, x_shape, self.scale_factor)

        # x: (b*sca le**2, c, h, w)
        x = x.repeat(self.scale_factor**2, 1, 1, 1)
        x = self.shuffle_batch(x, self.scale_factor)

        # test_bilinear(x, offsets, grid=self._get_grid(self, x))
        # x_offset = th_batch_map_offsets(x, offsets, grid=self._get_grid(self,x))
        ## X_offset: (b* scale**2 *c, h*w)
        x_offset = th_batch_map_offsets_torch(x, offsets, grid=self._get_grid(self,x), up_padding_mode=self.up_padding_mode)

        ## x_offset: (b, scale**2 * c, h, w)
        # x_offset = self._to_b_c_h_w(x_offset, x_shape)
        x_offset = self._to_b_s_h_w(x_offset, x_shape, self.scale_factor)
        # 需要重拍通道 1 2 3 4 ... 256 1 2 3 4 ... 256 1 2 3 4 ... 256 1 2 3 4 ... 256 -> 1 1 1 1 2 2 2 2 .... 256 256 256 256
        x_offset = self.shuffle_channels(x_offset, self.scale_factor**2)
        output = self.ps(x_offset)

        return output

    @staticmethod
    def _get_grid(self, x):
        batch_size, input_height, input_width = x.size(0), x.size(2), x.size(3)
        dtype, cuda = x.data.type(), x.data.is_cuda
        if self._grid_param == (batch_size, input_height, input_width, dtype, cuda):
            return self._grid
        self._grid_param = (batch_size, input_height, input_width, dtype, cuda)
        self._grid = th_generate_grid(batch_size, input_height, input_width, dtype, cuda)
        return self._grid

    def init_weights(self):
        self.weight.data.zero_()
        self.bias.data.zero_()

    @staticmethod
    def _to_bc_h_w_2(x, x_shape):
        """(b, 2c, h, w) -> (b*c, h, w, 2)"""
        # 从前向后跨维度reshape存在相应风险，此处view操作存在对应性问题
        # x = x.contiguous().view(-1, int(x_shape[2]), int(x_shape[3]), 2)
        x = x.contiguous()
        x = x.reshape(x_shape[0]*x_shape[1], 2, x_shape[2], x_shape[3])
        x = rearrange(x, 'b c h w -> b h w c')
        return x

    @staticmethod
    def _to_bc_h_w(x, x_shape):
        """(b, c, h, w) -> (b*c, h, w)"""
        x = x.contiguous().view(-1, int(x_shape[2]), int(x_shape[3]))
        return x

    @staticmethod
    def _to_bs_h_w_2(x, x_shape, scale_factor):
        x = x.contiguous()
        x = x.reshape(x_shape[0]* (scale_factor**2), 2, x_shape[2], x_shape[3])
        x = rearrange(x, 'b c h w -> b h w c')
        return x
    @staticmethod
    def _to_b_c_h_w(x, x_shape):
        """(b*c, h*w) -> (b, c, h, w)"""
        x = x.contiguous().view(-1, int(x_shape[1]), int(x_shape[2]), int(x_shape[3]))
        return x

    @staticmethod
    def _to_b_s_h_w(x, x_shape, scale_facor):
        """
            (b* s**2 *c, h*w) -> (b, s**2 * c, h, w)
        """
        x = x.contiguous().view(x_shape[0], scale_facor**2 * x_shape[1], x_shape[2], x_shape[3])
        return x

    def shuffle_batch(self, x, scale):
        '''
            Channel shuffle: [N,C,H,W] -> [scale^2, N / scale^2, C,H,W] -> [N / Scale^2, scale^2, C,H,W] -> [N,C,H,W]
            一共C个channel要分成g组混合的channel，先把C reshape成(g, C/g)的形状，然后转置成(C/g, g)最后平坦成C组channel
            参考shuffle channel
        '''
        N, C, H, W = x.size()
        return x.view(int(scale**2), int(N / scale**2), C, H, W).permute(1, 0, 2, 3, 4).contiguous().view(N, C, H, W)  # 因为x之前view过了，他的内存不连续了，需要contiguous来规整一下

    def shuffle_channels(self, x, groups):
        '''
            Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,W] -> [N,C,H,W]
            一共C个channel要分成g组混合的channel，先把C reshape成(g, C/g)的形状，然后转置成(C/g, g)最后平坦成C组channel
            参考shuffle channel
        '''
        N, C, H, W = x.size()
        return x.view(N, groups, C // groups, H, W).permute(0, 2, 1, 3, 4).contiguous().view(N, C, H, W)  # 因为x之前view过了，他的内存不连续了，需要contiguous来规整一下

if __name__ == "__main__":
    b = 4
    c = 8
    h, w = 8, 8
    inputs = torch.randn(b, c, h, w)
    conv_offset_2d = ConvOffset2DUpSample(8, res_mode=True)
    outputs = conv_offset_2d(inputs)
    print()


