import mindspore as ms
from mindspore import nn, ops
from mindspore import Parameter, Tensor
from mindspore.common.initializer import Zero
import numpy as np
import math
from .KL import KLLoss

"""Contrastive Representation Distillation"""
eps = 1e-7

class Embed(nn.Cell):
    def  __init__(self, dim_in=1024, dim_out=128):
        super(Embed, self).__init__()
        self.linear = nn.Dense(dim_in, dim_out)
        self.l2norm = ops.L2Normalize(axis=1)

    def construct(self, x):
        x = x.view(x.shape[0], -1)
        x = self.linear(x)
        x = self.l2norm(x)
        return x


class NCEAverage(nn.Cell):
    def __init__(self, all_data_num, opt, K, T=2, momentum=0.9):
        super(NCEAverage, self).__init__()
        self.K = K
        self.T = T
        self.momentum = momentum
        self.all_data_num = all_data_num
        stdv = 1. / math.sqrt(all_data_num / 3)
        self.teacher_mem = Parameter(Tensor(np.random.rand(all_data_num, opt.feat_dim) * 2 * stdv - stdv, dtype=ms.float32), requires_grad=False)
        self.student_mem = Parameter(Tensor(np.random.rand(all_data_num, opt.feat_dim) * 2 * stdv - stdv, dtype=ms.float32), requires_grad=False)
    
    def construct(self, v1, v2, y, neg, need_update):
        batchSize = v1.shape[0]
        inputSize = v1.shape[1]
        batchmul = ops.BatchMatMul()
        div = ops.Div()
        mul = ops.Mul()
        add = ops.Add()
        sub = ops.Sub()
        pow = ops.Pow()
        index_add = ops.IndexAdd(axis=0)
        no_grad = ops.stop_gradient

        weight_v1 = no_grad(self.teacher_mem.take(neg.astype("int32"), axis=0))
        weight_v1 = weight_v1.view(batchSize, self.K + 1, inputSize)
        out_v2 = batchmul(weight_v1, v2.view(batchSize, inputSize, 1))
        out_v2 = div(out_v2, self.T).squeeze()

        weight_v2 = no_grad(self.student_mem.take(neg.astype("int32"), axis=0))
        weight_v2 = weight_v2.view(batchSize, self.K + 1, inputSize)
        out_v1 = batchmul(weight_v2, v1.view(batchSize, inputSize, 1))
        out_v1 = div(out_v1, self.T).squeeze()

        if need_update:
            l_pos = self.teacher_mem.take(y.astype("int32"), axis=0)
            l_update = mul(l_pos, self.momentum)
            l_update = add(l_update, mul(v1, 1 - self.momentum))
            l_norm = pow(pow(l_update, 2).sum(1, keepdims=True), 0.5)
            l_update = div(l_update, l_norm)
            l_update = sub(l_update, l_pos)
            self.teacher_mem = no_grad(index_add(self.teacher_mem, y.astype("int32"), l_update))

            ab_pos = self.student_mem.take(y.astype("int32"), axis=0)
            ab_update = mul(ab_pos, self.momentum)
            ab_update = add(ab_update, mul(v2, 1 - self.momentum))
            ab_norm = pow(pow(ab_update, 2).sum(1, keepdims=True), 0.5)
            ab_update = div(ab_update, ab_norm)
            ab_update = sub(ab_update, ab_pos)
            self.student_mem = no_grad(index_add(self.student_mem, y.astype("int32"), ab_update))

        return out_v1, out_v2


class NCESoftmaxLoss(nn.Cell):
    def __init__(self, batch_size):
        super(NCESoftmaxLoss, self).__init__()
        self.criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
        zeros = ops.Zeros()
        self.label = zeros((batch_size,), ms.int32)
    
    def construct(self, x):
        return self.criterion(x, self.label)


class CRDLoss(nn.Cell):
    def __init__(self, opt, all_data_num):
        super(CRDLoss, self).__init__()
        self.all_data_num = all_data_num
        self.embed_s = Embed(opt.s_dim, opt.feat_dim)
        self.embed_t = Embed(opt.t_dim, opt.feat_dim)
        self.contrast = NCEAverage(all_data_num, opt, opt.k)
        self.criterion = NCESoftmaxLoss(opt.batch_size)
        self.embed_s.set_train()
        self.embed_t.set_train()

    def construct(self, idx, neg, f_s, f_t, need_update):
        f_s = f_s.view(f_s.shape[0], -1)
        f_t = f_t.view(f_t.shape[0], -1)

        f_embed_s = self.embed_s(f_s)
        f_embed_t = self.embed_t(f_t)
        
        out_s, out_t = self.contrast(f_embed_s, f_embed_t, idx, neg, need_update)
        s_loss = self.criterion(out_s)
        t_loss = self.criterion(out_t)
        loss = s_loss + t_loss

        return loss


class CRDLossCell(nn.Cell):
    def __init__(self, student_net, opt, all_data_num, beta):
        super(CRDLossCell, self).__init__(auto_prefix=False)
        self.student_net = student_net
        self.kl_loss = KLLoss(4)
        self.ce_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
        self.crd_loss = CRDLoss(opt, all_data_num)
        self.beta = beta

    def construct(self, idx, neg, teacher_fm, teacher_logits, data, label, need_update=True):
        student_fm, student_logits = self.student_net(data, need_feature_map=True)
        return self.ce_loss(student_logits, label) \
            + self.kl_loss(student_logits, teacher_logits) \
            + self.beta * self.crd_loss(idx, neg, student_fm, teacher_fm, need_update)
    
    @property
    def backbone_network(self):
        return self.student_net
