from typing import Optional, Tuple, Union

import torch
from torch import Tensor
from torch.nn.modules.utils import _pair
from mmdeploy.core import SYMBOLIC_REWRITER, FUNCTION_REWRITER
from mmcv.ops.deform_conv import DeformConv2dFunction


@SYMBOLIC_REWRITER.register_symbolic(
    'mmcv.ops.deform_conv.DeformConv2dFunction', backend='ascend')
def deformconv2dfunction_symbolic_ascend(
             g,
             input,
             offset,
             weight,
             stride,
             padding,
             dilation,
             groups,
             deform_groups,
             bias=False,
             im2col_step=32):
    return g.op(
        'mmdeploy::DeformableConv2D',
        input,
        weight,
        offset,
        strides_i=stride,
        pads_i=padding,
        dilations_i=dilation,
        groups_i=groups,
        deformable_groups_i=deform_groups,
        bias_i=bias,
        im2col_step_i=im2col_step,
        data_format_s="NCHW")


@FUNCTION_REWRITER.register_rewriter(
    func_name='mmcv.ops.deform_conv.DeformConv2dFunction.forward', 
    backend='ascend')
def deformconv2dfunction_forward_ascend(
        ctx,
        input: Tensor,
        offset: Tensor,
        weight: Tensor,
        stride: Union[int, Tuple[int, ...]] = 1,
        padding: Union[int, Tuple[int, ...]] = 0,
        dilation: Union[int, Tuple[int, ...]] = 1,
        groups: int = 1,
        deform_groups: int = 1,
        bias: bool = False,
        im2col_step: int = 32) -> Tensor:
    if input is not None and input.dim() != 4:
        raise ValueError(
            f'Expected 4D tensor as input, got {input.dim()}D tensor \
                instead.')
    assert bias is False, 'Only support bias is False.'
    ctx.stride = _pair(stride)
    ctx.padding = _pair(padding)
    ctx.dilation = _pair(dilation)
    ctx.groups = groups
    ctx.deform_groups = deform_groups
    ctx.im2col_step = im2col_step
    ctx.device = input.device.type

    # When pytorch version >= 1.6.0, amp is adopted for fp16 mode;
    # amp won't cast the type of model (float32), but "offset" is cast
    # to float16 by nn.Conv2d automatically, leading to the type
    # mismatch with input (when it is float32) or weight.
    # The flag for whether to use fp16 or amp is the type of "offset",
    # we cast weight and input to temporarily support fp16 and amp
    # whatever the pytorch version is.
    input = input.type_as(offset)
    weight = weight.type_as(input)
    if ctx.device == 'npu':
        mask_shape, _ = torch.chunk(offset, 2, dim=1)
        mask = torch.ones_like(mask_shape).to(input.device)
        bias = input.new_empty(0)
        output = ModulatedDeformConv2dFunction._npu_forward(
            ctx, input, offset, mask, weight, bias)
        return output
    ctx.save_for_backward(input, offset, weight)

    output = input.new_empty([
        int(i)
        for i in DeformConv2dFunction._output_size(ctx, input, weight)
    ])

    ctx.bufs_ = [input.new_empty(0), input.new_empty(0)]  # columns, ones

    cur_im2col_step = min(ctx.im2col_step, input.size(0))
    assert (input.size(0) % cur_im2col_step
            ) == 0, 'batch size must be divisible by im2col_step'
    if torch.onnx.is_in_onnx_export():
        return torch.rand(output.shape)
    ext_module.deform_conv_forward(
        input,
        weight,
        offset,
        output,
        ctx.bufs_[0],
        ctx.bufs_[1],
        kW=weight.size(3),
        kH=weight.size(2),
        dW=ctx.stride[1],
        dH=ctx.stride[0],
        padW=ctx.padding[1],
        padH=ctx.padding[0],
        dilationW=ctx.dilation[1],
        dilationH=ctx.dilation[0],
        group=ctx.groups,
        deformable_group=ctx.deform_groups,
        im2col_step=cur_im2col_step)
    return output


@FUNCTION_REWRITER.register_rewriter(
    func_name='mmcv.ops.deform_conv.DeformConv2d.forward', 
    backend='ascend')
def deformconv2d_forward_ascend(self, x: Tensor, offset: Tensor) -> Tensor:
    """Deformable Convolutional forward function.

    Args:
        x (Tensor): Input feature, shape (B, C_in, H_in, W_in)
        offset (Tensor): Offset for deformable convolution, shape
            (B, deform_groups*kernel_size[0]*kernel_size[1]*2,
            H_out, W_out), H_out, W_out are equal to the output's.

            An offset is like `[y0, x0, y1, x1, y2, x2, ..., y8, x8]`.
            The spatial arrangement is like:

            .. code:: text

                (x0, y0) (x1, y1) (x2, y2)
                (x3, y3) (x4, y4) (x5, y5)
                (x6, y6) (x7, y7) (x8, y8)

    Returns:
        Tensor: Output of the layer.
    """
    # To fix an assert error in deform_conv_cuda.cpp:128
    # input image is smaller than kernel
    input_pad = (x.size(2) < self.kernel_size[0]) or (x.size(3) <
                                                        self.kernel_size[1])
    if input_pad:
        pad_h = max(self.kernel_size[0] - x.size(2), 0)
        pad_w = max(self.kernel_size[1] - x.size(3), 0)
        x = F.pad(x, (0, pad_w, 0, pad_h), 'constant', 0).contiguous()
        offset = F.pad(offset, (0, pad_w, 0, pad_h), 'constant', 0)
        offset = offset.contiguous()
    if torch.onnx.is_in_onnx_export():
        offset_y = offset.reshape(1, -1, 2, offset.shape[2].numpy(), 
            offset.shape[3].numpy())[:, :, 0, ...].reshape(1, 
            offset.shape[1].numpy() // 2, offset.shape[2].numpy(), 
            offset.shape[3].numpy())
        offset_x = offset.reshape(1, -1, 2, offset.shape[2].numpy(), 
            offset.shape[3].numpy())[:, :, 1, ...].reshape(1, 
            offset.shape[1].numpy() // 2, offset.shape[2].numpy(), 
            offset.shape[3].numpy())
        mask = torch.ones(offset.shape[0].numpy(), offset.shape[1].numpy() // 2, 
            offset.shape[2].numpy(), offset.shape[3].numpy())
        offset = torch.cat((offset_x, offset_y, mask), 1)

    out = DeformConv2dFunction.apply(
        x, offset, self.weight, 
        self.stride, self.padding, self.dilation, self.groups, 
        self.deform_groups, False, self.im2col_step
    )
    if input_pad:
        out = out[:, :, :out.size(2) - pad_h, :out.size(3) -
                    pad_w].contiguous()
    return out

