import torch
import torch.nn as nn
import torch.nn.functional as F

__all__ = [
    "BarlowTwinsLoss",
    "GmimLoss",
    "CrossCorrelationLoss",
    "ImputationLoss",
    "IntraModalityContrastLoss",
    "ModalityConsisLoss",
    "InfoNCELoss",
    "ContrastLoss",
    "PreGliLoss",
    "LanguageImageContrastiveLoss",
    "GliomaMRIContrastiveLoss",
    "PreTextConsisLoss"
]


################ BarlowTwinsLoss ################
class BarlowTwinsLoss(nn.Module):
    """
    The criterion corresponding to the Barlow Twins loss as defined in the paper
    https://arxiv.org/abs/2103.03230v1.
    
    Hacked from https://github.com/facebookresearch/vissl/blob/main/vissl/losses/barlow_twins_loss.py
    
    Args:
        lambda_ (float):        weight on the off-diagonal terms. It controls the trade-off
                                between the importance given to the invariance term versus the
                                redundancy reduction term.
        scale_loss (float):     In order to match the code that was used to develop Barlow
                                Twins, we include an additional parameter, scale-loss, that
                                multiplies the loss by a constant factor. We are working on a
                                version that will not require this parameter.
        modality (int):    number of modalities
    """

    def __init__(self, lambda_: float, scale_loss: float, modality: int):
        super(BarlowTwinsLoss, self).__init__()

        self.lambda_ = lambda_
        self.scale_loss = scale_loss
        
        self.batch_norm = nn.BatchNorm1d(modality)

    @staticmethod
    def _off_diagonal(x: torch.Tensor) -> torch.Tensor:
        """
        return a flattened view of the off-diagonal elements of a square matrix
        """
        n, m = x.shape
        assert n == m
        return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
    
    def _get_criterion(self, a, b):
        # cross-correlation matrix
        m, c = a.size()
        correlation_matrix = torch.mm(a.T, b) / m

        # loss
        on_diag = (
            torch.diagonal(correlation_matrix).add(-1).pow(2).sum().mul(self.scale_loss)
        )
        off_diag = (
            self._off_diagonal(correlation_matrix).pow(2).sum().mul(self.scale_loss)
        )
        loss = on_diag + self.lambda_ * off_diag
        
        return loss
        

    def forward(self, f_seq: torch.Tensor, f_spa: torch.Tensor) -> torch.tensor:
        """
        Calculate the loss. Operates on embeddings tensor.

        Args:
            embedding (torch.Tensor):   NxEMBEDDING_DIM
                                        Must contain the concatenated embeddings
                                        of the two image copies:
                                        [emb_img1_0, emb_img2_0, ....., emb_img1_1, emb_img2_1,...]
        """
        self.batch_norm = self.batch_norm.to(f_seq.device)
        f_seq = self.batch_norm(f_seq)
        f_spa = self.batch_norm(f_spa)

        f_seq = torch.unbind(f_seq, dim=0)
        f_spa = torch.unbind(f_spa, dim=0)
        
        loss = torch.zeros(1, requires_grad=True).to(f_seq[0].device)
        for i, j in zip(f_seq, f_spa):
            loss = loss + self._get_criterion(i, j)

        return loss


################ Gmim ################
class GmimLoss(torch.nn.Module):
    def __init__(self, args):
        super().__init__()
        self.recon_loss = torch.nn.MSELoss().cuda()
        self.contrast_loss = CrossCorrelationLoss(args).cuda()
        self.alpha1 = args.alpha1
        self.alpha2 = args.alpha2

    def __call__(self, output_contrastive, target_contrastive, output_recons, target_recons):
        contrast_loss = self.alpha1 * self.contrast_loss(output_contrastive, target_contrastive)
        recon_loss = self.alpha2 * self.recon_loss(output_recons, target_recons)
        total_loss = contrast_loss + recon_loss
        return total_loss, contrast_loss, recon_loss
    

class CrossCorrelationLoss(torch.nn.Module):
    def __init__(self, args):
        super().__init__()
        self.args = args

    def forward(self, z_i, z_j):
        # empirical cross-correlation matrix
        b, n, c = z_i.size()
        loss = torch.zeros(1).cuda()
        for batch_index in range(b):
            c = z_i[batch_index].T @ z_j[batch_index]

            c.div_(n)

            on_diag = torch.diagonal(c).add_(-1).pow_(2).sum()
            off_diag = self.off_diagonal(c).pow_(2).sum()
            loss += (on_diag + self.args.lambd * off_diag)
        return loss / b

    @staticmethod
    def off_diagonal(x):
        # return a flattened view of the off-diagonal elements of a square matrix
        n, m = x.shape
        assert n == m
        return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()


################ ImputationLoss ################
class ImputationLoss(nn.Module):
    def __init__(self, 
                 alpha_lmse, 
                 alpha_lcon, 
                 alpha_lconsis,
                 temperature=0.5, 
                 batch_size=32,
                 embed_dim=512,
                 device='cpu'):
        super().__init__()
        
        self.l_mse = nn.MSELoss()
        self.l_con = IntraModalityContrastLoss(batch_size, temperature, device)
        self.l_consis = ModalityConsisLoss(batch_size, temperature, embed_dim, device)
        
        self.alpha_lmse = alpha_lmse
        self.alpha_lcon = alpha_lcon
        self.alpha_lconsis = alpha_lconsis
        
    def forward(self, x_rec, y_rec, x_con, y_con):
        l_mse = self.alpha_lmse * self.l_mse(x_rec, y_rec)
        l_consis = self.alpha_lconsis * self.l_consis(x_con, y_con)
        l_con = self.alpha_lcon * self.l_con(x_con, y_con)
        return l_mse, l_consis, l_con
            
        
class IntraModalityContrastLoss(torch.nn.Module):
    def __init__(self, batch_size=1, temperature=0.5, device='cpu'):
        super().__init__()
        self.batch_size = batch_size
        self.register_buffer("temp", torch.tensor(temperature).to(device))
        self.register_buffer("neg_mask", (~torch.eye(batch_size * 2 * 4, batch_size * 2 * 4, dtype=bool).to(device)).float())

    def forward(self, x_i, x_j):
        b, m, d = x_i.size()
        
        x_i = x_i.view(b * m, d)
        x_j = x_j.view(b * m, d)
        
        z_i = F.normalize(x_i, dim=1)
        z_j = F.normalize(x_j, dim=1)
        z = torch.cat([z_i, z_j], dim=0)

        sim = F.cosine_similarity(z.unsqueeze(1), z.unsqueeze(0), dim=2)
        sim_ij = torch.diag(sim, self.batch_size * m)
        sim_ji = torch.diag(sim, -self.batch_size * m)
        pos = torch.cat([sim_ij, sim_ji], dim=0)
        nom = torch.exp(pos / self.temp)

        denom = self.neg_mask * torch.exp(sim / self.temp)

        return torch.sum(-torch.log(nom / torch.sum(denom, dim=1))) / (2 * self.batch_size * m)


class ModalityConsisLoss(nn.Module):
    def __init__(self, batch_size=1, temperature=0.5, embed_dim=512, device='cpu'):
        super().__init__()
        self.projector = nn.Linear(embed_dim * 2, embed_dim).to(device)

        self.batch_size = batch_size
        self.register_buffer("temp", torch.tensor(temperature).to(device))
        self.register_buffer("neg_mask", (~torch.eye(batch_size * 2 * 3, batch_size * 2 * 3, dtype=bool).to(device)).float())

    def _get_criterion(self, x_i, x_j)   :     
        z_i = F.normalize(x_i, dim=1)
        z_j = F.normalize(x_j, dim=1)
        z = torch.cat([z_i, z_j], dim=0)
        sim = F.cosine_similarity(z.unsqueeze(1), z.unsqueeze(0), dim=2)
        sim_ij = torch.diag(sim, self.batch_size * 3)
        sim_ji = torch.diag(sim, -self.batch_size * 3)
        pos = torch.cat([sim_ij, sim_ji], dim=0)
        nom = torch.exp(pos / self.temp)
        denom = self.neg_mask * torch.exp(sim / self.temp)
        return torch.sum(-torch.log(nom / torch.sum(denom, dim=1))) / (2 * self.batch_size * 3)
    
    def forward(self, f_seq, f_spa):
        # input modality: [t1c, t1, t2, t2f]
        v_spa_t1c_t2 = self.projector(torch.cat([f_spa[:, 0], f_spa[:, 2]], dim=-1))
        v_seq_t1c_t2 = self.projector(torch.cat([f_seq[:, 0], f_seq[:, 2]], dim=-1))
        
        v_spa_t1_t2 = self.projector(torch.cat([f_spa[:, 1], f_spa[:, 2]], dim=-1))
        v_seq_t1_t2 = self.projector(torch.cat([f_seq[:, 1], f_seq[:, 2]], dim=-1))
        
        v_spa_t2_t2f = self.projector(torch.cat([f_spa[:, 3], f_spa[:, 2]], dim=-1))
        v_seq_t2_t2f = self.projector(torch.cat([f_seq[:, 3], f_seq[:, 2]], dim=-1))
        
        v_spa = torch.cat([v_spa_t1c_t2, v_spa_t1_t2, v_spa_t2_t2f], dim=0)
        v_seq = torch.cat([v_seq_t1c_t2, v_seq_t1_t2, v_seq_t2_t2f], dim=0)
        
        loss = self._get_criterion(v_spa, v_seq)

        # t1c_t2 = F.cosine_similarity(f_seq[:, 0] - f_seq[:, 2], f_spa[:, 0] - f_spa[:, 2], dim=-1)
        # t1_t2 = F.cosine_similarity(f_seq[:, 1] - f_seq[:, 2], f_spa[:, 1] - f_spa[:, 2], dim=-1)
        # t2_t2f = F.cosine_similarity(f_seq[:, 2] - f_seq[:, 3], f_spa[:, 2] - f_spa[:, 3], dim=-1)
    
        return loss


################ ContrastiveLoss ################
class InfoNCELoss(nn.Module):
    def __init__(self, batch_size, temperature=0.1, device='cpu') -> None:
        super().__init__()
        self.batch_size = batch_size
        self.temperature = temperature
        self.device = device
        self.criterion = torch.nn.CrossEntropyLoss().to(device)
        
    def forward(self, x_i, x_j):
        labels = torch.cat([torch.arange(self.batch_size) for i in range(2)], dim=0)
        labels = (labels.unsqueeze(0) == labels.unsqueeze(1)).float()
        labels = labels.to(self.device)

        x_i = F.normalize(x_i, dim=1)
        x_j = F.normalize(x_j, dim=1)
        
        features = torch.cat((x_i, x_j), dim=0)
        
        similarity_matrix = torch.matmul(features, features.T)
        # assert similarity_matrix.shape == (
        #     self.args.n_views * self.args.batch_size, self.args.n_views * self.args.batch_size)
        # assert similarity_matrix.shape == labels.shape

        # discard the main diagonal from both: labels and similarities matrix
        mask = torch.eye(labels.shape[0], dtype=torch.bool).to(self.device)
        labels = labels[~mask].view(labels.shape[0], -1)
        similarity_matrix = similarity_matrix[~mask].view(similarity_matrix.shape[0], -1)
        # assert similarity_matrix.shape == labels.shape

        # select and combine multiple positives
        positives = similarity_matrix[labels.bool()].view(labels.shape[0], -1)

        # select only the negatives the negatives
        negatives = similarity_matrix[~labels.bool()].view(similarity_matrix.shape[0], -1)

        logits = torch.cat([positives, negatives], dim=1)
        labels = torch.zeros(logits.shape[0], dtype=torch.long).to(self.device)

        logits = logits / self.temperature
        return self.criterion(logits, labels)
    

class ContrastLoss(torch.nn.Module):
    def __init__(self, batch_size=4, temperature=0.5, device='cpu'):
        super().__init__()
        self.batch_size = batch_size
        self.register_buffer("temp", torch.tensor(temperature).to(device))
        self.register_buffer("neg_mask", (~torch.eye(batch_size * 2, batch_size * 2, dtype=bool).to(device)).float())

    def _get_criterion(self, x_i, x_j):
        # x_i: [b, c] 
        z_i = F.normalize(x_i, dim=1)
        z_j = F.normalize(x_j, dim=1)
        z = torch.cat([z_i, z_j], dim=0)
        sim = F.cosine_similarity(z.unsqueeze(1), z.unsqueeze(0), dim=2)
        sim_ij = torch.diag(sim, self.batch_size)
        sim_ji = torch.diag(sim, -self.batch_size)
        pos = torch.cat([sim_ij, sim_ji], dim=0)
        nom = torch.exp(pos / self.temp)
        denom = self.neg_mask * torch.exp(sim / self.temp)
        return torch.sum(-torch.log(nom / torch.sum(denom, dim=1))) / (2 * self.batch_size)

    def forward(self, x, y):
        b = x.size()[0]
        x = torch.unbind(x, dim=0)
        y = torch.unbind(y, dim=0)
        
        loss = torch.zeros(1, requires_grad=True).to(x[0].device)
        for i, j in zip(x, y):
            loss = loss + self._get_criterion(x, y)
        return loss / b
    
    
################ MultiModalityLoss ################
class PreGliLoss(torch.nn.Module):
    def __init__(self, batch_size, temperature=0.05, alpha_1=1., alpha_2=1.):
        super().__init__()
        
        self.L2ILoss = LanguageImageContrastiveLoss()
        self.G2ILoss = GliomaMRIContrastiveLoss(batch_size, temperature=temperature)
        
        self.alpha_1 = alpha_1
        self.alpha_2 = alpha_2
    
    def forward(self, image_features, gli_features, text_features, logit_scale):
        L2Iloss = self.L2ILoss(image_features, text_features, logit_scale)
        G2Iloss = self.G2ILoss(gli_features, image_features)
        
        total_loss = self.alpha_1 * L2Iloss + self.alpha_2 * G2Iloss
        
        return total_loss, (L2Iloss, G2Iloss)
    
    
class LanguageImageContrastiveLoss(nn.Module):
    def __init__(self):
        super().__init__()

        # cache state
        self.prev_num_logits = 0
        self.labels = {}

    def get_ground_truth(self, device, num_logits) -> torch.Tensor:
        # calculated ground-truth and cache if enabled
        if self.prev_num_logits != num_logits or device not in self.labels:
            labels = torch.arange(num_logits, device=device, dtype=torch.long)
        else:
            labels = self.labels[device]
        return labels

    def get_logits(self, image_features, text_features, logit_scale):

        logits_per_image = logit_scale * image_features @ text_features.T
        logits_per_text = logit_scale * text_features @ image_features.T
        
        return logits_per_image, logits_per_text

    def forward(self, image_features, text_features, logit_scale, output_dict=False):
        device = image_features.device
        logits_per_image, logits_per_text = self.get_logits(image_features, text_features, logit_scale)

        labels = self.get_ground_truth(device, logits_per_image.shape[0])

        total_loss = (
            F.cross_entropy(logits_per_image, labels) +
            F.cross_entropy(logits_per_text, labels)
        ) / 2

        return {"contrastive_loss": total_loss} if output_dict else total_loss
    
    
class GliomaMRIContrastiveLoss(torch.nn.Module):
    def __init__(self, batch_size, temperature=0.5):
        super().__init__()

        self.batch_size = batch_size
        self.register_buffer("temp", torch.tensor(temperature).to(torch.device("cuda")))
        self.register_buffer("neg_mask", (~torch.eye(batch_size * 2, batch_size * 2, dtype=bool).to('cuda')).float())

    def forward(self, x_i, x_j):        
        z = torch.cat([x_i, x_j], dim=0)
        sim = F.cosine_similarity(z.unsqueeze(1), z.unsqueeze(0), dim=2)
        sim_ij = torch.diag(sim, self.batch_size)
        sim_ji = torch.diag(sim, -self.batch_size)
        pos = torch.cat([sim_ij, sim_ji], dim=0)
        nom = torch.exp(pos / self.temp)
        denom = self.neg_mask * torch.exp(sim / self.temp)
        return torch.sum(-torch.log(nom / torch.sum(denom, dim=1))) / (2 * self.batch_size)


class PreTextConsisLoss(torch.nn.Module):
    def __init__(self, alpha_1=1., alpha_2=1.):
        super().__init__()
        self.l1_loss = torch.nn.SmoothL1Loss()
        self.cossim = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
        
        self.alpha_1 = alpha_1
        self.alpha_2 = alpha_2
    
    def __call__(self, f_0, f_1, f_2, t_0, t_1, t_2):
        f_0 = F.normalize(f_0, dim=1)
        f_1 = F.normalize(f_1, dim=1)
        f_2 = F.normalize(f_2, dim=1)
        
        t_0 = F.normalize(t_0, dim=1)
        t_1 = F.normalize(t_1, dim=1)
        t_2 = F.normalize(t_2, dim=1)
        
        distill_loss = (self.l1_loss(f_0, t_0) + self.l1_loss(f_1, t_1) + self.l1_loss(f_2, t_2)) / 3
        
        dist_regularization = torch.abs(self.cossim(f_0, f_2) - self.cossim(f_1, f_0) - self.cossim(f_2, f_1)).mean()
        
        total_loss = self.alpha_1 * distill_loss + self.alpha_2 * dist_regularization

        return total_loss, (distill_loss, dist_regularization)



if __name__ == "__main__":
    inps1 = torch.randn((6, 4, 512))
    inps2 = torch.randn((6, 4, 512))
    
    infoloss = IntraModalityContrastLoss(6, 0.799)
    contrastloss = ModalityConsisLoss(6, 0.999)
    
    a = infoloss(inps1, inps2)
    b = contrastloss(inps1, inps2)
    print(a, b)
    
    
