# -*- coding: utf-8 -*-
import jittor
import math


def matsubabs_transpose(a, b):
    assert a.shape[-1] == b.shape[-1], (a.shape, b.shape)
    if len(a.shape) != 2:
        aa = a.reshape((-1, a.shape[-1]))
        cc = matsubabs_transpose(aa, b)
        return cc.reshape(a.shape[:-1] + (-1,))
    assert len(a.shape) == 2 and len(b.shape) == 2

    shape = list(a.shape)[:-1] + list(b.shape)
    a = a.broadcast(shape, [len(shape) - 2])
    b = b.broadcast(shape)
    return (a - b).abs().sum(len(shape) - 1), (a * b).sum(len(shape) - 1)


class SubtractorLinear(jittor.nn.Module):

    def __init__(self, in_features, out_features, bias=True):
        self.in_features = in_features
        self.out_features = out_features
        self.weight = jittor.init.invariant_uniform((out_features, in_features), "float32")
        bound = 1.0 / math.sqrt(in_features)
        self.bias = jittor.init.uniform((out_features,), "float32", -bound, bound) if bias else None

    def execute(self, x):
        x_sub, x_mul = matsubabs_transpose(x, self.weight)
        self.regterm = (x_sub - x_mul).abs().mean()
        if self.bias is not None:
            return x_sub + self.bias
        return x_sub


class SubtractorConv2D(jittor.nn.Module):

    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False):
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size, kernel_size)
        self.stride = stride if isinstance(stride, tuple) else (stride, stride)
        self.padding = padding if isinstance(padding, tuple) else (padding, padding)
        self.dilation = dilation if isinstance(dilation, tuple) else (dilation, dilation)
        self.groups = groups
        self.is_depthwise_conv = self.groups == self.out_channels and self.groups == self.in_channels
        if self.is_depthwise_conv and jittor.flags.use_cuda:
            self.depthwise_conv = jittor.depthwise_conv.DepthwiseConv(stride, padding, dilation)
        assert in_channels % groups == 0, 'in_channels must be divisible by groups'
        assert out_channels % groups == 0, 'out_channels must be divisible by groups'
        Kh, Kw = self.kernel_size
        self.groups = groups
        assert in_channels % groups == 0, 'in_channels must be divisible by groups'
        assert out_channels % groups == 0, 'out_channels must be divisible by groups'

        # self.weight = init.relu_invariant_gauss([out_channels, in_channels//groups, Kh, Kw], dtype="float", mode="fan_out")
        self.weight = jittor.init.invariant_uniform([out_channels, in_channels // groups, Kh, Kw], dtype="float")
        self.regterm = None
        if bias:
            fan = 1
            for i in self.weight.shape[1:]:
                fan *= i
            bound = 1 / math.sqrt(fan)
            self.bias = jittor.init.uniform([out_channels], dtype="float", low=-bound, high=bound)
        else:
            self.bias = None

    def execute(self, x):
        if self.is_depthwise_conv and jittor.flags.use_cuda:
            y = self.depthwise_conv(x, self.weight)
            if self.bias is not None:
                b = self.bias.broadcast(y.shape, [0, 2, 3])
                y = y + b
            self.regterm = 0
            return y
        elif self.groups == 1:
            N, C, H, W = x.shape
            Kh, Kw = self.kernel_size
            assert C == self.in_channels
            oh = (H + self.padding[0] * 2 - Kh * self.dilation[0] + self.dilation[0] - 1) // self.stride[0] + 1
            ow = (W + self.padding[1] * 2 - Kw * self.dilation[1] + self.dilation[1] - 1) // self.stride[1] + 1
            assert oh > 0 and ow > 0
            xx = x.reindex([N, self.out_channels, C, oh, ow, Kh, Kw], [
                'i0',  # Nid
                'i2',  # Cid
                f'i3*{self.stride[0]}-{self.padding[0]}+i5*{self.dilation[0]}',  # Hid+Khid
                f'i4*{self.stride[1]}-{self.padding[1]}+i6*{self.dilation[1]}',  # Wid+KWid
            ])
            ww = self.weight.broadcast(xx.shape, [0, 3, 4])
            yy = (xx - ww).abs()
            self.regterm = (yy - xx * ww).abs().mean()
            y = yy.sum([2, 5, 6])  # Kc, Kh, Kw
            if self.bias is not None:
                b = self.bias.broadcast(y.shape, [0, 2, 3])
                y = y + b
            return y
        else:
            N, C, H, W = x.shape
            Kh, Kw = self.kernel_size
            G = self.groups
            CpG = C // G  # channels per group
            assert C == self.in_channels
            oc = self.out_channels
            oh = (H + self.padding[0] * 2 - Kh * self.dilation[0] + self.dilation[0] - 1) // self.stride[0] + 1
            ow = (W + self.padding[1] * 2 - Kw * self.dilation[1] + self.dilation[1] - 1) // self.stride[1] + 1
            assert oh > 0 and ow > 0
            xx = x.reindex([N, G, oc // G, CpG, oh, ow, Kh, Kw], [
                'i0',  # Nid
                f'i1*{CpG}+i3',  # Gid
                f'i4*{self.stride[0]}-{self.padding[0]}+i6*{self.dilation[0]}',  # Hid+Khid
                f'i5*{self.stride[1]}-{self.padding[1]}+i7*{self.dilation[1]}',  # Wid+KWid
            ])
            # w: [oc, CpG, Kh, Kw]
            ww = self.weight.reindex([N, G, oc // G, CpG, oh, ow, Kh, Kw], [
                f'i1*{oc // G}+i2',
                'i3',
                'i6',
                'i7'
            ])
            ww.compile_options = xx.compile_options = {"G": G, "C": C}
            yy = (xx - ww).abs()
            self.regterm = (yy - xx * ww).abs().mean()
            y = yy.reindex_reduce('add', [N, oc, oh, ow], [
                'i0',
                f'i1*{oc // G}+i2',
                'i4',
                'i5'
            ])
            if self.bias is not None:
                b = self.bias.broadcast(y.shape, [0, 2, 3])
                y = y + b
            return y
