import torch
import torch.nn as nn
import torch.nn.functional as F


class ContrastiveLossELI5(nn.Module):
    def __init__(self, batch_size, temperature=0.5, verbose=False):
        super().__init__()
        self.batch_size = batch_size
        self.register_buffer("temperature", torch.tensor(temperature))
        self.verbose = verbose

    def forward(self, emb_i, emb_j):
        """
        emb_i and emb_j are batches of embeddings, where corresponding indices are pairs
        z_i, z_j as per SimCLR paper
        """
        z_i = F.normalize(emb_i, dim=1)  # [batch_size, emb_size]
        z_j = F.normalize(emb_j, dim=1)  # [batch_size, emb_size]

        representations = torch.cat([z_i, z_j], dim=0)  # [2 * batch_size, emb_size]
        # [2*batch_size, 1, emb_size] * [1, 2*batch_size, emb_size] --> [2*batch_size, 2*batch_size]
        similarity_matrix = F.cosine_similarity(representations.unsqueeze(1), representations.unsqueeze(0), dim=2)
        if self.verbose:
            print("Similarity matrix\n", similarity_matrix, "\n")

        def l_ij(i, j):
            #z_i_, z_j_ = representations[i], representations[j]
            sim_i_j = similarity_matrix[i, j]
            if self.verbose:
                print(f"sim({i}, {j})={sim_i_j}")

            numerator = torch.exp(sim_i_j / self.temperature)  # 正样例
            one_for_not_i = torch.ones((2 * self.batch_size,)).to(emb_i.device).scatter_(0, torch.tensor([i]).to(emb_i.device), 0.0)
            if self.verbose: print(f"1{{k!={i}}}", one_for_not_i)

            if self.verbose:
                print("one_for_not_i:", one_for_not_i.size())
                print("similarity_matrix[i, :]", similarity_matrix.size())
            denominator = torch.sum(  # 正负样例
                one_for_not_i * torch.exp(similarity_matrix[i, :] / self.temperature)
            )
            if self.verbose:
                print("Denominator", denominator)

            loss_ij = -torch.log(numerator / denominator)
            if self.verbose:
                print(f"loss({i},{j})={loss_ij}\n")

            return loss_ij.squeeze(0)

        N = self.batch_size
        loss = 0.0
        for k in range(0, N):
            loss += l_ij(k, k + N) + l_ij(k + N, k)
        return 1.0 / (2 * N) * loss


class ContrastiveLossS1(nn.Module):
    def __init__(self, batch_size, temperature=0.5, verbose=False):
        super().__init__()
        self.batch_size = batch_size
        # self.temperature = 0.5
        self.register_buffer("temperature", torch.tensor(temperature))
        self.verbose = verbose

    def forward(self, emb):
        """
        emb_i and emb_j are batches of embeddings, where corresponding indices are pairs
        z_i, z_j as per SimCLR paper
        """
        b_s, p_s, e_s = emb.size()
        assert b_s == self.batch_size
        z = F.normalize(emb, dim=1)  # [batch_size, pool_size, emb_size]

        # [b_s, 2, 1, emb_size] * [b_s, 1, p_s, emb_size] --> [b_s, 2, p_s]
        similarity_matrix = F.cosine_similarity(z[:, :2].unsqueeze(2), z.unsqueeze(1), dim=3)
        if self.verbose:
            print("Similarity matrix\n", similarity_matrix, "\n")

        def l_ij(k, i, j):
            #z_i_, z_j_ = representations[i], representations[j]
            sim_i_j = similarity_matrix[k][i, j]
            if self.verbose:
                print(f"sim({i}, {j})={sim_i_j}")

            numerator = torch.exp(sim_i_j / self.temperature)  # 正样例
            one_for_not_i = torch.ones((p_s,)).to(emb.device).scatter_(0, torch.tensor([i]).to(emb.device), 0.0)
            if self.verbose:
                print(f"1{{k!={i}}}", one_for_not_i)
                print("one_for_not_i:", one_for_not_i.size())
                print("similarity_matrix[i, :]", similarity_matrix[k].size())
            denominator = torch.sum(  # 正负样例
                one_for_not_i * torch.exp(similarity_matrix[k][i, :] / self.temperature)
            )
            if self.verbose:
                print("Denominator", denominator)

            loss_ij = -torch.log(numerator / denominator)
            if self.verbose:
                print(f"loss({i},{j})={loss_ij}\n")

            return loss_ij.squeeze(0)

        N = self.batch_size
        loss = 0.0
        for k in range(0, N):
            loss += l_ij(k, 0, 1) + l_ij(k, 1, 0)
        return 1.0 / (2 * N) * loss

if __name__ == "__main__":
    con = ContrastiveLossELI5(3)
    a = torch.randn(3, 10)
    b = torch.randn(3, 10)
    output = con(a, b)
    print(output)

    con1 = ContrastiveLossS1(3)
    a = torch.randn(3, 5, 10)
    output1 = con1(a)
    print(output1)
