import mindspore
import numpy as np

from mindspore import nn
from mindspore import ops
from mindspore import Tensor
from mindspore.ops import grid_sample, constexpr
from mindspore import numpy as mnp
from mindspore.common.parameter import Parameter

EPS = 1e-8


class CrossEntropyLabelSmooth(nn.Cell):
    """
    Cross entropy loss with label smoothing regularizer.

    Reference:
    Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.
    Equation: y = (1 - epsilon) * y + epsilon / K.

    Args:
    - num_classes (int): number of classes.
    - epsilon (float): weight.
    """

    def __init__(self, epsilon=0.1):
        super(CrossEntropyLabelSmooth, self).__init__()
        self.epsilon = epsilon
        self.zeros = mindspore.ops.Zeros()
        self.unsqueeze = mindspore.ops.ExpandDims()
        self.logsoftmax = nn.LogSoftmax(axis=1)
        self.sum = mindspore.ops.ReduceSum(keep_dims=False)
        self.onehot = ops.OneHot(1)

    def construct(self, inputs, targets):
        """
        Args:
        - inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)
        - targets: ground truth labels with shape (num_classes)
        """
        num_classes = inputs.shape[1]
        log_probs = self.logsoftmax(inputs)
        depth, on_value, off_value = num_classes, Tensor(1.0, mindspore.float32), Tensor(0.0, mindspore.float32)
        targets = self.onehot(targets, depth, on_value, off_value)
        targets = (1 - self.epsilon) * targets + self.epsilon / num_classes
        loss = (- targets * log_probs).mean(axis=0, keep_dims=False)
        loss = self.sum(loss, 0)

        return loss


class MultiCropWrapper(nn.Cell):
    """
    Perform forward pass separately on each resolution input.
    The inputs corresponding to a single resolution are clubbed and single
    forward is run on the same resolution inputs. Hence we do several
    forward passes = number of different resolutions used. We then
    concatenate all the output features and run the head forward on these
    concatenated features.
    """

    def __init__(self, backbone, head):
        super(MultiCropWrapper, self).__init__()
        # disable layers dedicated to ImageNet labels classification
        backbone.fc, backbone.head = nn.Identity(), nn.Identity()
        self.backbone = backbone
        self.head = head
        self.cumsum = ops.CumSum(exclusive=False)
        self.concat = ops.Concat()

    def construct(self, x):
        # convert to list
        if not isinstance(x, list):
            x = [x]
        # idx_crops = self.cumsum(ops.unique_consecutive(
        #     Tensor([inp.shape[-1] for inp in x]),
        #     return_counts=True,
        # )[1], 0)
        start_idx = 0
        if len(x) == 2:
            idx_crops = [2]
        else:
            idx_crops = [2, 12]
        output = None
        for end_idx in idx_crops:
            # end_idx = idx_crops[idx]
            _out = self.backbone(self.concat(x[start_idx: end_idx]))[0]  # TODO:这里改了一下，主要用于cluster
            if start_idx == 0:
                output = _out
            else:
                output = ops.concat((output, _out))
            start_idx = end_idx
        # Run the head forward on the concatenated features.
        return self.head(output)


class MultiCropWrapper_v2(nn.Cell):
    """
    Perform forward pass separately on each resolution input.
    The inputs corresponding to a single resolution are clubbed and single
    forward is run on the same resolution inputs. Hence we do several
    forward passes = number of different resolutions used. We then
    concatenate all the output features and run the head forward on these
    concatenated features.
    """
    """
    Perform forward pass separately on each resolution input.
    The inputs corresponding to a single resolution are clubbed and single
    forward is run on the same resolution inputs. Hence we do several
    forward passes = number of different resolutions used. We then
    concatenate all the output features and run the head forward on these
    concatenated features.
    """

    def __init__(self, backbone, head):
        super(MultiCropWrapper_v2, self).__init__()
        # disable layers dedicated to ImageNet labels classification
        backbone.fc, backbone.head = nn.Identity(), nn.Identity()
        self.backbone = backbone
        self.head = head
        self.cumsum = ops.CumSum(exclusive=False)

    def construct(self, x):
        if not isinstance(x, list):
            x = [x]
        # idx_crops = self.cumsum(ops.unique_consecutive(
        #     Tensor([inp.shape[-1] for inp in x]),
        #     return_counts=True,
        # )[1], 0)
        output = None
        # if len(x) == 1:
        output = self.backbone(x[0])[1]
        # else:
        #     start_idx = 0
        #     for idx in range(idx_crops.shape[0]):
        #         end_idx = idx_crops[idx]
        #         _out = self.backbone(ops.concat(x[start_idx: end_idx]))[1]  # TODO:这里改了一下，主要用于cluster
        #         if start_idx == 0:
        #             output = _out
        #         else:
        #             output = ops.concat((output, _out))
        #         start_idx = end_idx
        # Run the head forward on the concatenated features.
        return self.head(output)


class ClusteringHead(nn.Cell):
    def __init__(self, in_dim, out_dim, input_sz, num_sub_heads=1):
        super(ClusteringHead, self).__init__()
        num_features = in_dim
        self.num_sub_heads = num_sub_heads
        self.heads = nn.CellList([nn.SequentialCell(
            nn.Conv2d(num_features, out_dim, kernel_size=1,
                      stride=1, dilation=1, has_bias=False),
            nn.Softmax(axis=1)) for _ in range(self.num_sub_heads)])
        self.input_sz = input_sz

    def construct(self, x):
        results = []
        for i in range(self.num_sub_heads):
            x_i = self.heads[i](x)  # x_i=[1,25,27,18]
            x_i = ops.interpolate(x_i, sizes=self.input_sz, mode="bilinear")
            results.append(x_i)

        return results


class ClusteringTwoHead(nn.Cell):
    def __init__(self, in_dim, out_dim_A, out_dim_B, input_sz, num_sub_heads=1):  # in_dim=768
        super(ClusteringTwoHead, self).__init__()

        self.head_A = ClusteringHead(in_dim, out_dim_A, input_sz)  # 要训的类别数
        self.head_B = ClusteringHead(in_dim, out_dim_B, input_sz)  # 更大的类别数

    def construct(self, x, patches_h=None, patches_w=None, head="A"):
        x = x.transpose(0, 2, 1)
        a = x.shape[2]
        if patches_w is None:
            patches_h = np.sqrt(a).astype(int)
            patches_w = np.sqrt(a).astype(int)
        x = x.reshape(x.shape[0], x.shape[1], patches_h, patches_w)
        # print(x.shape)[16,768,14,14]
        if head == "A":
            x = self.head_A(x)
        elif head == "B":
            x = self.head_B(x)
        else:
            assert False
        return x


def get_param_groups(network):
    """Param groups for optimizer."""
    decay_params = []
    no_decay_params = []
    for x in network.trainable_params():
        parameter_name = x.name
        if parameter_name.endswith('.bias'):
            # all bias not using weight decay
            no_decay_params.append(x)
        elif parameter_name.endswith('.gamma'):
            # bn weight bias not using weight decay, be carefully for now x not
            # include BN
            no_decay_params.append(x)
        elif parameter_name.endswith('.beta'):
            # bn weight bias not using weight decay, be carefully for now x not
            # include BN
            no_decay_params.append(x)
        else:
            decay_params.append(x)

    return [{'params': no_decay_params, 'weight_decay': 0.0},
            {'params': decay_params}]


class RandomAffine(nn.Cell):
    def __init__(self, min_rot, max_rot, min_shear,
                 max_shear, min_scale, max_scale):
        super(RandomAffine, self).__init__()
        self.min_rot = min_rot
        self.max_rot = max_rot
        self.min_shear = min_shear
        self.max_shear = max_shear
        self.min_scale = min_scale
        self.max_scale = max_scale
        self.unsqueeze = ops.ExpandDims()

    def construct(self, img):
        out_image = []
        all_affine1_to_2 = []
        all_affine2_to_1 = []
        assert (len(img.shape) == 4)
        for im in img:
            a = np.radians(np.random.rand() * (self.max_rot - self.min_rot) + self.min_rot)
            shear = np.radians(np.random.rand() * (self.max_shear - self.min_shear) + self.min_shear)
            scale = np.random.rand() * (self.max_scale - self.min_scale) + self.min_scale
            affine1_to_2 = np.array([[np.cos(a) * scale, - np.sin(a + shear) * scale, 0.],
                                     [np.sin(a) * scale, np.cos(a + shear) * scale, 0.],
                                     [0., 0., 1.]], dtype=np.float32)  # 3x3
            affine2_to_1 = np.linalg.inv(affine1_to_2).astype(np.float32)
            affine1_to_2, affine2_to_1 = affine1_to_2[:2, :], affine2_to_1[:2, :]  # 2x3
            affine1_to_2, affine2_to_1 = Tensor(affine1_to_2), Tensor(affine2_to_1)
            img = perform_affine_tf(self.unsqueeze(im, 0), self.unsqueeze(affine1_to_2, 0))
            affine1_to_2, affine2_to_1 = self.unsqueeze(affine1_to_2, 0), self.unsqueeze(affine2_to_1, 0)
            out_image.append(img)
            all_affine1_to_2.append(affine1_to_2)
            all_affine2_to_1.append(affine2_to_1)
        out_image = ops.concat(out_image, 0)
        all_affine1_to_2 = ops.concat(all_affine1_to_2, 0)
        all_affine2_to_1 = ops.concat(all_affine2_to_1, 0)
        return out_image, all_affine1_to_2, all_affine2_to_1


def perform_affine_tf(data, tf_matrices):
    # expects 4D tensor, we preserve gradients if there are any

    n_i, k, h, w = data.shape
    n_i2, r, c = tf_matrices.shape
    assert (n_i == n_i2)
    assert (r == 2 and c == 3)

    # grid = affine_grid(tf_matrices, data.shape)  # output should be same size
    grid = mnp.ones((1, 224, 224, 2), np.float16)
    data_tf = grid_sample(data, grid, padding_mode="zeros")  # this can ONLY do bilinear

    return data_tf


class DINOHead(nn.Cell):
    def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048,
                 bottleneck_dim=256):
        super().__init__()
        nlayers = max(nlayers, 1)
        if nlayers == 1:
            self.mlp = nn.Dense(in_dim, bottleneck_dim)
        else:
            layers = [nn.Dense(in_dim, hidden_dim)]
            if use_bn:
                layers.append(nn.BatchNorm1d(hidden_dim))
            layers.append(nn.GELU())
            for _ in range(nlayers - 2):
                layers.append(nn.Dense(hidden_dim, hidden_dim))
                if use_bn:
                    layers.append(nn.BatchNorm1d(hidden_dim))
                layers.append(nn.GELU())
            layers.append(nn.Dense(hidden_dim, bottleneck_dim))
            self.mlp = nn.SequentialCell(*layers)
        # TODO self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))  # ?
        # TODO self.last_layer.weight_g.data.fill_(1)  # ?
        
        # self.last_layer = WeightNorm(nn.Dense(bottleneck_dim, out_dim, has_bias=False))
        # self.last_layer.param_g.set_data(mnp.ones(self.last_layer.param_g.shape))
        
        self.last_layer = nn.Dense(bottleneck_dim, out_dim)
        # self.last_layer.weight.set_data(mnp.ones(self.last_layer.weight.shape))
        
        self.normalize = ops.L2Normalize(axis=-1, epsilon=1e-12)
        # if norm_last_layer:
        #     self.last_layer.weight_g.requires_grad = False

    def construct(self, x):
        # x = self.l1(x)  # TODO:新加的一个linear层，可能和别的不兼容
        x = self.mlp(x)
        x = self.normalize(x)
        x = self.last_layer(x)
        return x


class DINOLoss(nn.Cell):
    def __init__(self, out_dim, ncrops, warmup_teacher_temp, teacher_temp,
                 warmup_teacher_temp_epochs, nepochs, epoch_split, student_temp=0.1,
                 center_momentum=0.9):
        super().__init__()
        self.student_temp = student_temp
        self.center_momentum = center_momentum
        self.ncrops = ncrops
        self.center = mindspore.numpy.zeros((2, out_dim))
        self.epoch_split = epoch_split
        self.teacher_temp_schedule = np.concatenate((
            np.linspace(warmup_teacher_temp,
                        teacher_temp, int(warmup_teacher_temp_epochs / self.epoch_split)),
            np.ones(int(nepochs / self.epoch_split) - int(warmup_teacher_temp_epochs / self.epoch_split)) * teacher_temp
        ))
        self.log_softmax = nn.LogSoftmax()
        # print ( int(warmup_teacher_temp_epochs/self.epoch_split), int(nepochs/self.epoch_split))
        self.chunk_ncrops = ops.Split(axis=0, output_num=self.ncrops)
        self.softmax = ops.Softmax(axis=-1)
        self.iter = 1000

    def update_epoch(self, epoch):
        self.epoch = epoch

    def update_iter(self, iter):
        self.iter = iter

    def construct(self, student_output, teacher_output):
        """
        Cross-entropy between softmax outputs of the teacher and student networks.
        """
        student_out = student_output / self.student_temp
        student_out = self.chunk_ncrops(student_out)

        # teacher centering and sharpening
        temp = self.teacher_temp_schedule[int(self.iter / 3337)]
        # teacher_out = (teacher_output - self.center) / temp
        teacher_out = self.softmax(teacher_output)
        # teacher_out = self.softmax(student_output)
        teacher_out = ops.stop_gradient(teacher_out)
        teacher_out = ops.split(teacher_out, 0, 2)

        total_loss = 0
        n_loss_terms = 0
        for iq, q in enumerate(teacher_out):
            for v in range(len(student_out)):
                if v == iq:
                    # we skip cases where student and teacher operate on the same view
                    continue
                loss = (-q * self.log_softmax(student_out[v])).sum(axis=-1)
                total_loss += loss.mean()
                n_loss_terms += 1
        total_loss /= n_loss_terms
        # self.update_center(teacher_output)
        return total_loss


def IID_segmentation_loss_uncollapsed(x1_outs, x2_outs, all_affine2_to_1=None,
                                      lamb=1.0,
                                      half_T_side_dense=10,
                                      half_T_side_sparse_min=None,
                                      half_T_side_sparse_max=None):
    # TODO: requires_grad
    # assert x1_outs.rxequires_grad
    # assert x2_outs.requires_grad
    # assert (not all_affine2_to_1.requires_grad)
    assert (x1_outs.shape == x2_outs.shape)
    x2_outs_inv = perform_affine_tf(x2_outs, all_affine2_to_1)
    if (half_T_side_sparse_min != 0) or (half_T_side_sparse_max != 0):
        x2_outs_inv = random_translation_multiple(x2_outs_inv,
                                                  half_side_min=half_T_side_sparse_min,
                                                  half_side_max=half_T_side_sparse_max)
    bn, k, h, w = x1_outs.shape
    # sum over everything except classes, by convolving x1_outs with x2_outs_inv
    # which is symmetric, so doesn't matter which one is the filter
    x1_outs = x1_outs.transpose(1, 0, 2, 3)  # k, ni, h, w .contiguous()
    x2_outs_inv = x2_outs_inv.transpose(1, 0, 2, 3)  # k, ni, h, w .contiguous()

    conv2d = ops.Conv2D(out_channel=25, kernel_size=224)
    p_i_j_conv = full_conv(x1_outs, x2_outs_inv)
    # p_i_j_conv = mnp.ones((25,25,1,1))
    # p_i_j_conv = ops.conv2d(x1_outs, weight=x2_outs_inv, padding=(half_T_side_dense,
    #                                                               half_T_side_dense,
    #                                                               half_T_side_dense,
    #                                                               half_T_side_dense))
    T_side_dense = half_T_side_dense * 2 + 1

    # T x T x k x k
    p_i_j_raw = p_i_j_conv.transpose(2, 3, 0, 1)
    p_i_j = p_i_j_raw / p_i_j_raw.sum(3, None, True, None)  # norm
    p_i_j = p_i_j.sum(2, None, True, None)
    # symmetrise, transpose the k x k part
    p_i_j = (p_i_j + p_i_j.transpose(0, 1, 3, 2)) / 2.0

    # T x T x k x k
    p_i_mat = mnp.tile(p_i_j.sum(2, None, True, None), (1, 1, k, 1))
    p_j_mat = mnp.tile(p_i_j.sum(3, None, True, None), (1, 1, 1, k))

    # for log stability; tiny values cancelled out by mult with p_i_j anyway
    p_i_j[(p_i_j < EPS)] = EPS
    p_i_mat[(p_i_mat < EPS)] = EPS
    p_j_mat[(p_j_mat < EPS)] = EPS

    # maximise information
    loss = (-p_i_j * (ops.log(p_i_j) - lamb * ops.log(p_i_mat) -
                      lamb * ops.log(p_j_mat))).sum() / (
                   T_side_dense * T_side_dense)

    # for analysis only
    loss_no_lamb = (-p_i_j * (ops.log(p_i_j) - ops.log(p_i_mat) -
                              ops.log(p_j_mat))).sum() / (
                           T_side_dense * T_side_dense)
    # print (p_i_mat)
    # if ops.isnan(loss):
    #     print(1)
    return loss, loss_no_lamb


class random_translation_multiple(nn.Cell):
    def __init__(self, half_side_min, half_side_max):
        super(random_translation_multiple, self).__init__()
        self.half_side_min = half_side_min
        self.half_side_max = half_side_max
        self.Pad = nn.Pad((half_side_max, half_side_max, half_side_max, half_side_max))

    def construct(self, data):
        n, c, h, w = data.shape

        # pad last 2, i.e. spatial, dimensions, equally in all directions
        data = self.Pad(data)
        assert (data.shape[2:] == (2 * self.half_side_max + h, 2 * self.half_side_max + w))

        # random x, y displacement
        t = np.random.randint(self.half_side_min, self.half_side_max + 1, size=(2,))
        polarities = np.random.choice([-1, 1], size=(2,), replace=True)
        t *= polarities

        # -x, -y in orig img frame is now -x+half_side_max, -y+half_side_max in new
        t += self.half_side_max

        data = data[:, :, t[1]:(t[1] + h), t[0]:(t[0] + w)]
        assert (data.shape[2:] == (h, w))

        return data

    
def norm_except_dim(v, pow, dim):
    if dim == -1:
        return mnp.norm(v, pow)
    elif dim == 0:
        output_size = (v.shape[0],) + (1,) * (v.ndim - 1)
        return mnp.norm(v.view((v.shape[0], -1)), pow, 1).view(output_size)
    elif dim == (v.ndim - 1):
        output_size = (1,) * (v.ndim - 1) + (v.shape[v.ndim - 1])
        return mnp.norm(v.view((-1, v.shape[v.ndim - 1])), pow, 0).view(output_size)
    else:
        return norm_except_dim(v.swapaxes(0, dim), pow, dim).swapaxes(0, dim)

def _weight_norm(v, g, dim):
    return v * (g / norm_except_dim(v, 2, dim))

class WeightNorm(nn.Cell):
    r"""Applies weight normalization to a parameter in the given module.
    .. math::
         \mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|}
    Weight normalization is a reparameterization that decouples the magnitude
    of a weight tensor from its direction. 
    By default, with ``dim=0``, the norm is computed independently per output
    channel/plane. To compute a norm over the entire weight tensor, use
    ``dim=None``.
    See https://arxiv.org/abs/1602.07868
    Args:
        module (Module): containing module
        dim (int, optional): dimension over which to compute the norm
    Returns:
        The original module with the weight norm hook
    Example::
        >>> m = WeightNorm(nn.Dense(20, 40))
        >>> m.param_g.shape
        (40, 1)
        >>> m.param_v.shape
        (40, 20)
    """
    def __init__(self, module, dim=0):
        super().__init__()
        if dim is None:
            dim = -1
        self.dim = dim
        self.module = module
        self.assign = ops.Assign()
        # add g and v as new parameters and express w as g/||v|| * v
        self.param_g = Parameter(Tensor(norm_except_dim(self.module.weight, 2, dim)))
        self.param_g.set_data(mnp.ones(self.param_g.shape))
        self.param_v = Parameter(Tensor(self.module.weight.data))
        self.module.weight.set_data(_weight_norm(self.param_v, self.param_g, self.dim))
        self.use_weight_norm = True

    def construct(self, *inputs, **kwargs):
        if not self.use_weight_norm:
            return self.module(*inputs, **kwargs)
        self.assign(self.module.weight, _weight_norm(self.param_v, self.param_g, self.dim))
        return self.module(*inputs, **kwargs)

    def remove_weight_norm(self):
        self.assign(self.module.weight, _weight_norm(self.param_v, self.param_g, self.dim))
        self.use_weight_norm = False
        
def full_conv(inputs, weights):
    op = ops.ReduceSum(keep_dims=False)
    temp_list = []
    all_output = []
    for input_index in range(inputs.shape[0]):  # 1x224x224
        for weight_index in range(weights.shape[0]):  # 1x224x224
            temp = ops.mul(inputs[input_index], weights[weight_index])
            temp = op(temp)
            temp = ops.expand_dims(temp, -1)
            temp = ops.expand_dims(temp, -1)
            temp = ops.expand_dims(temp, -1)
            temp_list.append(temp)
        output = ops.concat(temp_list, 0)
        temp_list = []
        all_output.append(ops.expand_dims(output, 0))
    all_output = ops.concat(all_output, 0)
    return all_output