import torch
from packaging import version
from torch import nn
from torch.nn.functional import cross_entropy


class NCELoss(nn.Module):
    def __init__(self, C, K, nce_t=0.07):
        """
        Arguments:
            C (int): number of classes
            K (int): number of negative samples
        """
        super().__init__()
        self.nce_t = nce_t
        self.quene = torch.zeros(C, K)
        self.quene_index = 0

    def forward(self, query: torch.Tensor, key: torch.Tensor):
        """
        according to formula 5 :Unsupervised Deraining: Where Contrastive Learning Meets Self-similarity \\
        $$
        sum_{k=1}^N_q sum_{j=1}^N_q -\log frac{\exp(q_k \cdot q_j / tau)}{sum_{i=1}^{N_k} exp(q_k cdot k_i / tau)}
        $$ \\
        Arguments:
            query (Tensor): N, C
            key (Tensor): N, C
        """
        key = key.detach()
        l_pos = torch.bmm(query.unsqueeze(1), key.unsqueeze(2)).squeeze(2)

        l_neg = torch.bmm(query.unsqueeze(1), self.quene.unsqueeze(2)).squeeze(2)

        logits = torch.cat([l_pos.unsqueeze(0), l_neg], dim=0) / self.nce_t

        labels = torch.zeros(logits.shape[0], dtype=torch.long, device=query.device)

        loss = cross_entropy(logits, labels)

        self.quene[:, self.quene_index : self.quene_index + key.size(0)] = key.T
        return loss


class PatchNCELoss(nn.Module):
    """
    Location Contrastive loss
    """

    def __init__(self, num_patches_b=256, num_patches_o=256, nce_t=0.07, **kwargs):
        super().__init__()
        self.nce_t = nce_t
        self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction="none")

    def forward(self, feat_q, feat_k):
        """
        Arguments:
            feat_q (Tensor): N, num_patches_pos, C
            feat_k (Tensor): N, num_patches_neg, C
        """
        batch_size = feat_q.shape[0]
        emb_dims = feat_q.shape[1]  # 256
        feat_k = feat_k.detach()

        # l_pos = torch.zeros([batch_size, emb_dims, 1], device=feat_q.device)
        # for i in range(batch_size):
        #     l_pos[i, ...] = torch.bmm(
        #         feat_q[i, ...].view(emb_dims, 1, -1),
        #         feat_k[i, ...].view(emb_dims, -1, 1),
        #     ).squeeze(2)
        l_pos = torch.bmm(
            feat_q.flatten(0, 1).unsqueeze(1), feat_k.flatten(0, 1).unsqueeze(2)
        ).view(batch_size, emb_dims, 1)
        # 256 x 1 x 1 -> 256 x 1
        # l_pos = l_pos.view(emb_dims, 1)

        # neg logit, cosine similarity of all the patches that are not corresponding

        # Should the negatives from the other samples of a minibatch be utilized?
        # In CUT and FastCUT, we found that it's best to only include negatives
        # from the same image. Therefore, we set
        # --nce_includes_all_negatives_from_minibatch as False
        # However, for single-image translation, the minibatch consists of
        # crops from the "same" high-resolution image.
        # Therefore, we will include the negatives from the entire minibatch.

        # reshape features to batch size, feat_q and feat_k: 1 x 256 x 256
        l_neg_curbatch = torch.bmm(feat_q, feat_k.permute(0, 2, 1))

        # diagonal entries are similarity between same features, and hence meaningless.
        # just fill the diagonal with very small number, which is exp(-10) and almost zero
        diagonal = torch.eye(emb_dims, device=feat_q.device, dtype=torch.bool)[
            None, :, :
        ]  # 1 x 256 x 256
        l_neg_curbatch.masked_fill_(diagonal, -10.0)
        l_neg = l_neg_curbatch.view(batch_size, -1, emb_dims)  # 256 x 256

        out = torch.cat((l_pos, l_neg), dim=2) / self.nce_t
        loss = torch.zeros([out.shape[0]]).to(device=feat_q.device)
        labels = torch.zeros(out.shape[1], dtype=torch.long, device=feat_q.device)
        for i in range(out.shape[0]):
            loss[i] = self.cross_entropy_loss(out[i], labels).mean()

        return loss.mean()


class DisNCELoss(nn.Module):
    """
    Layer Contrastive loss
    """

    def __init__(self, *, num_patches_b=8, num_patches_n=128, nce_t=0.07, **kwargs):
        super().__init__()
        self.num_patches_pos = num_patches_b
        self.num_patches_neg = num_patches_n
        self.nce_t = nce_t

    # feat_B for the background, feat_R for the rain
    # shape: (batch_size, num_patches, feature length)
    def forward(self, featB, featR):
        batch_size = featB.shape[0]
        # if featR.shape[0] != num_patches*batch_size:
        #     raise ValueError('num_patches of rain and background are not equal')

        # making labels
        labels_neg = torch.zeros(self.num_patches_neg + self.num_patches_pos, 1)

        loss_dis = 0
        # obtain each background and the rain layer to calculate the disentangle loss
        for batch in range(batch_size):
            cur_featB = featB[batch]
            cur_featR = featR[batch]
            cur_disloss = self.cal_each_disloss(cur_featB, cur_featR, labels_neg)
            loss_dis = loss_dis + cur_disloss
        return loss_dis

    # cur_featB: [num_patches, feature length]
    # labels: [num_patches*2, 1]
    # @torch.no_grad()
    def cal_each_disloss(self, cur_featB, cur_featR, labels):
        featFusion = torch.cat([cur_featB, cur_featR], dim=0)
        mask = torch.eq(labels, labels.t()).float().to(cur_featB.device)

        num_patches = featFusion.shape[0]
        # contrast_count: number of all the rain and background patches
        contrast_feature = featFusion
        #         contrast_count = featFusion.shape[1]
        contrast_count = 1
        anchor_feature = contrast_feature
        anchor_count = contrast_count

        # compute logits of all the elements
        # Denoting: zi: one sample, zl: all the other samples, zp: positives to zi, za: negatives to zi
        # anchor_dot_contrast = zi * zl
        anchor_dot_contrast = torch.div(
            torch.matmul(anchor_feature, contrast_feature.t()), self.nce_t
        )
        # for numerical stability
        logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
        logits = anchor_dot_contrast - logits_max.detach()

        # tile mask, repeat the masks to match the n_views of positives
        mask = mask.repeat(anchor_count, contrast_count)
        # mask-out self-contrast cases
        logits_mask = torch.ones_like(mask).scatter_(
            1,
            torch.arange(num_patches * anchor_count).view(-1, 1).to(cur_featB.device),
            0,
        )
        mask = mask * logits_mask

        # compute log_prob
        # exp_logits: exp(zi * zl)
        exp_logits = torch.exp(logits) * logits_mask
        # the meaning of sum(1): sum the cosine similarity of one sample and all the other samples
        # log_prob: (zi*zl) - log(sum(exp(zi,zl))) = log[exp(zi*zl) / sum(exp(zi * zl)) ]
        log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True) + 1e-6)

        # compute mean of log-likelihood over positive
        # (mask * log_prob).sum(1): log [sum(exp(zi*zp)) / sum(exp(zi*zl)) ]
        # mask.sum(1): |P(i)|
        # mean_log_prob_pos: L^sup_out
        loss = -(mask * log_prob).sum(1) / (mask.sum(1) + 1e-6)

        # loss
        loss = loss.mean()

        return loss


if __name__ == "__main__":
    q = torch.ones([16, 8, 256])
    k = torch.ones([16, 128, 256])

    nce = NCELoss()
    loss = nce(q, k)
    print(loss)
