import torch
import torch.nn as nn
from torch.autograd import Variable


def cosine_sim(x1, x2, dim=-1, eps=1e-8):
    """Returns cosine similarity between x1 and x2, computed along dim."""
    w12 = torch.sum(x1 * x2, dim)
    w1 = torch.norm(x1, 2, dim)
    w2 = torch.norm(x2, 2, dim)
    return (w12 / (w1 * w2).clamp(min=eps)).squeeze()


class ContrastiveLoss(nn.Module):
    """
    Compute contrastive loss (max-margin based)
    """

    def __init__(self, opts, logger=None, writer=None):
        super().__init__()
        self.l = logger
        self.w = writer
        self.margin = opts.margin
        self.max_violation = opts.max_violation

    def max_violation_on(self):
        self.max_violation = True
        self.l.info('Use VSE++ objective.')

    def max_violation_off(self):
        self.max_violation = False
        self.l.info('Use VSE0 objective.')

    def forward(self, scores):
        # compute image-sentence score matrix
        # scores = get_sim(im, s)
        diagonal = scores.diag().view(scores.size(0), 1)
        d1 = diagonal.expand_as(scores)
        d2 = diagonal.t().expand_as(scores)

        # compare every diagonal score to scores in its column
        # caption retrieval
        cost_s = (self.margin + scores - d1).clamp(min=0)
        # compare every diagonal score to scores in its row
        # image retrieval
        cost_im = (self.margin + scores - d2).clamp(min=0)

        # clear diagonals
        # mask = torch.eye(scores.size(0)) > .5
        # if torch.cuda.is_available():
        #     mask = mask.cuda()
        # I = Variable(mask)
        # 清除对角线的值
        mask = torch.diag_embed(torch.ones(scores.size(0), device=scores.device)).bool()
        cost_s = cost_s.masked_fill_(mask, 0)
        cost_im = cost_im.masked_fill_(mask, 0)

        # keep the maximum violating negative for each query
        if self.max_violation:
            cost_s = cost_s.max(1)[0]
            cost_im = cost_im.max(0)[0]
        return cost_s.sum() + cost_im.sum()
        # else:
        #     stack_txt = torch.stack((s, err), dim=1)
        #     stack_im = torch.stack((im, im), dim=1)
        #     scores = cosine_sim(stack_im, stack_txt)
        #     d1 = scores[:, 0].unsqueeze(1).expand_as(scores)
        #     cost_s = (scores - d1 + self.margin).clamp(min=0)
        #     cost_s = cost_s[:, 1]
        #     return cost_s.sum()


class Contrastiveloss2(nn.Module):
    def __init__(self, opts, logger=None, writer=None, ):
        super().__init__()
        self.l = logger
        self.w = writer


if __name__ == '__main__':
    a = torch.randn((64, 2, 512))
    b = torch.randn((64, 2, 512))
    sim = cosine_sim(a, b)
    pass
