import torch
from torch import nn


# class LineConv2d(nn.Module):
#     # modulation 是可选参数，为卷积核的每个位置分配一个权重
#     def __init__(self, inc, outc, kernel_size=3, padding=1, stride=1, bias=None, modulation=False):
#         """
#         Args:
#             modulation (bool, optional): If True, Modulated Defomable Convolution (Deformable ConvNets v2).
#         """
#         super(LineConv2d, self).__init__()
#         self.kernel_size = kernel_size
#         self.padding = padding
#         self.stride = stride
#         self.zero_padding = nn.ZeroPad2d(padding)
#
#         self.conv = nn.Conv2d(inc, outc, kernel_size=kernel_size, stride=kernel_size, bias=bias)
#
#         self.modulation = modulation
#         if modulation:
#             self.m_conv = nn.Conv2d(inc, kernel_size*kernel_size, kernel_size=3, padding=1, stride=stride)
#             nn.init.constant_(self.m_conv.weight, 0)
#             self.m_conv.register_backward_hook(self._set_lr)
#
#     @staticmethod
#     def _set_lr(module, grad_input, grad_output):
#         grad_input = (grad_input[i] * 0.1 for i in range(len(grad_input)))
#         grad_output = (grad_output[i] * 0.1 for i in range(len(grad_output)))
#
#     def forward(self, x, angle):
#         if self.modulation:
#             m = torch.sigmoid(self.m_conv(x))
#
#         dtype = angle.data.type()
#         ks = self.kernel_size
#         N = ks * ks
#
#         if self.padding:
#             x = self.zero_padding(x)
#             angle = self.zero_padding(angle)
#
#         # (b, 2N, h, w)
#         # 中心坐标 + 旋转后直线相对坐标/普通相对坐标
#         p = self._get_p(angle, dtype)
#
#         # (b, h, w, 2N)
#         p = p.contiguous().permute(0, 2, 3, 1)
#         q_lt = p.detach().floor()
#         q_rb = q_lt + 1
#
#         # 双线性插值坐标
#         q_lt = torch.cat([torch.clamp(q_lt[..., :N], 0, x.size(2)-1), torch.clamp(q_lt[..., N:], 0, x.size(3)-1)], dim=-1).long()
#         q_rb = torch.cat([torch.clamp(q_rb[..., :N], 0, x.size(2)-1), torch.clamp(q_rb[..., N:], 0, x.size(3)-1)], dim=-1).long()
#         q_lb = torch.cat([q_lt[..., :N], q_rb[..., N:]], dim=-1)
#         q_rt = torch.cat([q_rb[..., :N], q_lt[..., N:]], dim=-1)
#
#         # clip p
#         p = torch.cat([torch.clamp(p[..., :N], 0, x.size(2)-1), torch.clamp(p[..., N:], 0, x.size(3)-1)], dim=-1)
#
#         # bilinear kernel (b, h, w, N) 双线性插值权重
#         g_lt = (1 + (q_lt[..., :N].type_as(p) - p[..., :N])) * (1 + (q_lt[..., N:].type_as(p) - p[..., N:]))
#         g_rb = (1 - (q_rb[..., :N].type_as(p) - p[..., :N])) * (1 - (q_rb[..., N:].type_as(p) - p[..., N:]))
#         g_lb = (1 + (q_lb[..., :N].type_as(p) - p[..., :N])) * (1 - (q_lb[..., N:].type_as(p) - p[..., N:]))
#         g_rt = (1 - (q_rt[..., :N].type_as(p) - p[..., :N])) * (1 + (q_rt[..., N:].type_as(p) - p[..., N:]))
#
#         # (b, c, h, w, N) 双线性插值取值
#         x_q_lt = self._get_x_q(x, q_lt, N)
#         x_q_rb = self._get_x_q(x, q_rb, N)
#         x_q_lb = self._get_x_q(x, q_lb, N)
#         x_q_rt = self._get_x_q(x, q_rt, N)
#
#         # (b, c, h, w, N) 双线性插值结果
#         x_offset = g_lt.unsqueeze(dim=1) * x_q_lt + \
#                    g_rb.unsqueeze(dim=1) * x_q_rb + \
#                    g_lb.unsqueeze(dim=1) * x_q_lb + \
#                    g_rt.unsqueeze(dim=1) * x_q_rt
#
#         # modulation
#         if self.modulation:
#             m = m.contiguous().permute(0, 2, 3, 1)
#             m = m.unsqueeze(dim=1)
#             m = torch.cat([m for _ in range(x_offset.size(1))], dim=1)
#             x_offset *= m
#
#         # 将每个卷积核的值恢复成方形
#         x_offset = self._reshape_x_offset(x_offset, ks)
#         # 卷积处理，即将卷积核的值加权相加处理
#         out = self.conv(x_offset)
#
#         return out
#
#     def _get_p_o(self, N, dtype):
#         # 卷积核内部的坐标网格，以卷积核中心为原点
#         p_o_x, p_o_y = torch.meshgrid(
#             torch.arange(-(self.kernel_size - 1) // 2, (self.kernel_size - 1) // 2 + 1),
#             torch.arange(-(self.kernel_size - 1) // 2, (self.kernel_size - 1) // 2 + 1))
#         # (2N, 1)
#         p_o = torch.cat([torch.flatten(p_o_x), torch.flatten(p_o_y)], 0)
#         p_o = p_o.view(1, 2 * N, 1, 1).type(dtype)
#
#         return p_o
#     def _get_p_n(self, N, dtype):
#         # 卷积核内部的坐标网格，以卷积核中心为原点，水平方向排列
#         p_n_x, p_n_y = torch.meshgrid(
#             torch.arange(0, 1),
#             torch.arange(-(self.kernel_size * self.kernel_size -1)//2, (self.kernel_size * self.kernel_size-1)//2+1))
#         # (2N, 1)
#         p_n = torch.cat([torch.flatten(p_n_x), torch.flatten(p_n_y)], 0)
#         p_n = p_n.view(1, 2*N, 1, 1).type(dtype)
#
#         return p_n
#
#     def _get_p_0(self, N, angle, dtype):
#         # 已经是padding后的尺寸
#         H, W = angle.size(2), angle.size(3)
#         # 从 1 开始，即对原始 x padding 1
#         p_0_x, p_0_y = torch.meshgrid(
#             torch.arange((self.kernel_size - 1) // 2, H - (self.kernel_size - 1) // 2, self.stride),
#             torch.arange((self.kernel_size - 1) // 2, W - (self.kernel_size - 1) // 2, self.stride))
#         row = p_0_x.size(0)
#         col = p_0_x.size(1)
#         angle = angle[:, :, p_0_x, p_0_y]
#         theta = torch.deg2rad(angle)
#         p_0_x = torch.flatten(p_0_x).view(1, 1, row, col).repeat(1, N, 1, 1)
#         p_0_y = torch.flatten(p_0_y).view(1, 1, row, col).repeat(1, N, 1, 1)
#         # 1 x 2N x h x w
#         p_0 = torch.cat([p_0_x, p_0_y], 1).type(dtype)
#
#         return p_0, theta, angle
#
#     def _get_p(self, angle, dtype):
#         b = angle.size(0)
#         N = self.kernel_size * self.kernel_size
#         # (1, 2N, 1, 1) 直线卷积相对位置
#         p_n = self._get_p_n(N, dtype)
#         # 普通卷积核相对位置
#         p_o = self._get_p_n(N, dtype)
#
#         # (1, 2N, h, w) 原始图像中卷积核中心坐标位置
#         # theta, angle_n 大小为 b x 1 x h x w
#         p_0, theta, angle_n = self._get_p_0(N, angle,  dtype)
#         h, w = p_0.size(2), p_0.size(3)
#
#         # 1, 2N, 1, 1 -> b, 2N, h, w
#         p_n = p_n.repeat(b, 1, h, w)
#         p_o = p_o.repeat(b, 1, h, w)
#
#         # 获取坐标张量的行坐标和列坐标部分
#         row_coordinates = p_n[:, :N, :, :]
#         col_coordinates = p_n[:, N:, :, :]
#
#         # 将角度张量扩展到与坐标张量相同的维度（b x N x h x w）
#         theta = theta.expand(-1, N, -1, -1)
#
#         # 计算旋转后的坐标
#         col_rotated = col_coordinates * torch.cos(theta) + row_coordinates * torch.sin(theta)
#         row_rotated = -col_coordinates * torch.sin(theta) + row_coordinates * torch.cos(theta)
#
#         # 将旋转后的坐标组合成大小为 b x 2N x h x w 的张量
#         rotated_coordinates = torch.cat([row_rotated, col_rotated], dim=1)
#
#         # 将角度小于-3.的位置认为不存在角度，则使用普通卷积处理
#         bo, _, ho, wo = torch.where(angle_n < -3.0)
#         rotated_coordinates[bo, :, ho, wo] = p_o[bo, :, ho, wo]
#
#         # b 2N h w + 1 2N h w
#         p = rotated_coordinates + p_0
#
#         return p
#
#     def _get_x_q(self, x, q, N):
#         b, h, w, _ = q.size()
#         padded_w = x.size(3)
#         c = x.size(1)
#         # (b, c, H*W)
#         x = x.contiguous().view(b, c, -1)
#
#         # (b, h, w, N)
#         index = q[..., :N]*padded_w + q[..., N:]  # offset_x*w + offset_y
#         # (b, c, h*w*N)
#         index = index.contiguous().unsqueeze(dim=1).expand(-1, c, -1, -1, -1).contiguous().view(b, c, -1)
#
#         # b c h w N
#         x_offset = x.gather(dim=-1, index=index).contiguous().view(b, c, h, w, N)
#
#         return x_offset
#
#     @staticmethod
#     def _reshape_x_offset(x_offset, ks):
#         b, c, h, w, N = x_offset.size()
#         x_offset = torch.cat([x_offset[..., s:s+ks].contiguous().view(b, c, h, w*ks) for s in range(0, N, ks)], dim=-1)
#         x_offset = x_offset.contiguous().view(b, c, h*ks, w*ks)
#
#         return x_offset

class LineConv2d(nn.Module):
    # modulation 是可选参数，为卷积核的每个位置分配一个权重
    def __init__(self, inc, outc, kernel_size=3, padding=4, stride=1, bias=None, modulation=False):
        """
        Args:
            modulation (bool, optional): If True, Modulated Defomable Convolution (Deformable ConvNets v2).
        """
        super(LineConv2d, self).__init__()
        self.kernel_size = kernel_size
        self.padding = padding
        self.stride = stride
        self.zero_padding = nn.ZeroPad2d(padding)

        self.conv = nn.Conv2d(inc, outc, kernel_size=kernel_size, stride=kernel_size, bias=bias)

        self.modulation = modulation
        if modulation:
            self.m_conv = nn.Conv2d(inc, kernel_size*kernel_size, kernel_size=3, padding=1, stride=stride)
            nn.init.constant_(self.m_conv.weight, 0)
            self.m_conv.register_backward_hook(self._set_lr)

    @staticmethod
    def _set_lr(module, grad_input, grad_output):
        grad_input = (grad_input[i] * 0.1 for i in range(len(grad_input)))
        grad_output = (grad_output[i] * 0.1 for i in range(len(grad_output)))

    def forward(self, x, angle):
        if self.modulation:
            m = torch.sigmoid(self.m_conv(x))

        dtype = angle.data.type()
        ks = self.kernel_size
        N = ks * ks

        if self.padding:
            x = self.zero_padding(x)
            angle = self.zero_padding(angle)

        # (b, 2N, h, w)
        # 中心坐标 + 旋转后直线相对坐标/普通相对坐标
        p = self._get_p(angle, dtype)

        # (b, h, w, 2N)
        p = p.contiguous().permute(0, 2, 3, 1)
        q_lt = p.detach().floor()
        q_rb = q_lt + 1

        # 双线性插值坐标
        q_lt = torch.cat([torch.clamp(q_lt[..., :N], 0, x.size(2)-1), torch.clamp(q_lt[..., N:], 0, x.size(3)-1)], dim=-1).long()
        q_rb = torch.cat([torch.clamp(q_rb[..., :N], 0, x.size(2)-1), torch.clamp(q_rb[..., N:], 0, x.size(3)-1)], dim=-1).long()
        q_lb = torch.cat([q_lt[..., :N], q_rb[..., N:]], dim=-1)
        q_rt = torch.cat([q_rb[..., :N], q_lt[..., N:]], dim=-1)

        # clip p
        p = torch.cat([torch.clamp(p[..., :N], 0, x.size(2)-1), torch.clamp(p[..., N:], 0, x.size(3)-1)], dim=-1)

        # bilinear kernel (b, h, w, N) 双线性插值权重
        g_lt = (1 + (q_lt[..., :N].type_as(p) - p[..., :N])) * (1 + (q_lt[..., N:].type_as(p) - p[..., N:]))
        g_rb = (1 - (q_rb[..., :N].type_as(p) - p[..., :N])) * (1 - (q_rb[..., N:].type_as(p) - p[..., N:]))
        g_lb = (1 + (q_lb[..., :N].type_as(p) - p[..., :N])) * (1 - (q_lb[..., N:].type_as(p) - p[..., N:]))
        g_rt = (1 - (q_rt[..., :N].type_as(p) - p[..., :N])) * (1 + (q_rt[..., N:].type_as(p) - p[..., N:]))

        # (b, c, h, w, N) 双线性插值取值
        x_q_lt = self._get_x_q(x, q_lt, N)
        x_q_rb = self._get_x_q(x, q_rb, N)
        x_q_lb = self._get_x_q(x, q_lb, N)
        x_q_rt = self._get_x_q(x, q_rt, N)

        # (b, c, h, w, N) 双线性插值结果
        x_offset = g_lt.unsqueeze(dim=1) * x_q_lt + \
                   g_rb.unsqueeze(dim=1) * x_q_rb + \
                   g_lb.unsqueeze(dim=1) * x_q_lb + \
                   g_rt.unsqueeze(dim=1) * x_q_rt

        # modulation
        if self.modulation:
            m = m.contiguous().permute(0, 2, 3, 1)
            m = m.unsqueeze(dim=1)
            m = torch.cat([m for _ in range(x_offset.size(1))], dim=1)
            x_offset *= m

        # 将每个卷积核的值恢复成方形
        x_offset = self._reshape_x_offset(x_offset, ks)
        # 卷积处理，即将卷积核的值加权相加处理
        out = self.conv(x_offset)

        return out

    def _get_p_n(self, N, dtype):
        # 卷积核内部的坐标网格，以卷积核中心为原点，水平方向排列
        p_n_u, p_n_v = torch.meshgrid(
            torch.arange(0, 1),
            torch.arange(-(self.kernel_size * self.kernel_size -1)//2, (self.kernel_size * self.kernel_size-1)//2+1))
        # (2N, 1) 行坐标 列坐标
        p_n = torch.cat([torch.flatten(p_n_u), torch.flatten(p_n_v)], 0)
        p_n = p_n.view(1, 2*N, 1, 1).type(dtype)

        return p_n

    def _get_p_o(self, N, dtype):
        # 卷积核内部的坐标网格，以卷积核中心为原点
        p_o_u, p_o_v = torch.meshgrid(
            torch.arange(-(self.kernel_size - 1) // 2, (self.kernel_size - 1) // 2 + 1),
            torch.arange(-(self.kernel_size - 1) // 2, (self.kernel_size - 1) // 2 + 1))
        # (2N, 1)
        p_o = torch.cat([torch.flatten(p_o_u), torch.flatten(p_o_v)], 0)
        p_o = p_o.view(1, 2 * N, 1, 1).type(dtype)

        return p_o

    def _get_p_0(self, N, angle, dtype):
        # 已经是padding后的尺寸
        H, W = angle.size(2), angle.size(3)
        # 从 1 开始，即对原始 x padding 1
        p_0_u, p_0_v = torch.meshgrid(
            torch.arange((N - 1) // 2, H - ((N - 1) // 2), self.stride),
            torch.arange((N - 1) // 2, W - ((N - 1) // 2), self.stride))
        row = p_0_u.size(0)
        col = p_0_v.size(1)
        angle = angle[:, :, p_0_u, p_0_v]
        theta = torch.deg2rad(angle)
        p_0_x = torch.flatten(p_0_u).view(1, 1, row, col).repeat(1, N, 1, 1)
        p_0_y = torch.flatten(p_0_v).view(1, 1, row, col).repeat(1, N, 1, 1)
        # 1 x 2N x h x w
        p_0 = torch.cat([p_0_x, p_0_y], 1).type(dtype)

        return p_0, theta, angle

    def _get_p(self, angle, dtype):
        b = angle.size(0)
        N = self.kernel_size * self.kernel_size
        # (1, 2N, 1, 1) 直线卷积相对位置
        p_n = self._get_p_n(N, dtype)
        # (1, 2N, 1, 1) 普通卷积相对位置
        p_o = self._get_p_o(N, dtype)

        # (1, 2N, h, w) 原始图像中卷积核中心坐标位置
        # theta, angle_n 大小为 b x 1 x h x w
        p_0, theta, angle_n = self._get_p_0(N, angle,  dtype)
        h, w = p_0.size(2), p_0.size(3)

        # 1, 2N, 1, 1 -> b, 2N, h, w
        p_n = p_n.repeat(b, 1, h, w)
        p_o = p_o.repeat(b, 1, h, w)

        # 获取坐标张量的行坐标和列坐标部分
        row_coordinates = p_n[:, :N, :, :]
        col_coordinates = p_n[:, N:, :, :]

        # 将角度张量扩展到与坐标张量相同的维度（b x N x h x w）
        theta = theta.expand(-1, N, -1, -1)

        # 仅对具有角度信息的中心像素进行旋转
        has_angle_condition = angle_n >= -2.0
        col_rotated = torch.where(has_angle_condition,
                                  col_coordinates * torch.cos(theta) + row_coordinates * torch.sin(theta),
                                  p_o[:, N:, :, :])
        row_rotated = torch.where(has_angle_condition,
                                  -col_coordinates * torch.sin(theta) + row_coordinates * torch.cos(theta),
                                  p_o[:, :N, :, :])

        # 将旋转后的坐标组合成大小为 b x 2N x h x w 的张量
        rotated_coordinates = torch.cat([row_rotated, col_rotated], dim=1)

        # b 2N h w + 1 2N h w
        p = rotated_coordinates + p_0

        return p

    def _get_x_q(self, x, q, N):
        b, h, w, _ = q.size()
        padded_w = x.size(3)
        c = x.size(1)
        # (b, c, H*W)
        x = x.contiguous().view(b, c, -1)

        # (b, h, w, N)
        index = q[..., :N]*padded_w + q[..., N:]  # offset_x*w + offset_y
        # (b, c, h*w*N)
        index = index.contiguous().unsqueeze(dim=1).expand(-1, c, -1, -1, -1).contiguous().view(b, c, -1)

        # b c h w N
        x_offset = x.gather(dim=-1, index=index).contiguous().view(b, c, h, w, N)

        return x_offset

    @staticmethod
    def _reshape_x_offset(x_offset, ks):
        b, c, h, w, N = x_offset.size()
        x_offset = torch.cat([x_offset[..., s:s+ks].contiguous().view(b, c, h, w*ks) for s in range(0, N, ks)], dim=-1)
        x_offset = x_offset.contiguous().view(b, c, h*ks, w*ks)

        return x_offset