import torch
import torch.nn as nn
from torch.nn import functional as F

class NoFussCrossEntropyLoss(nn.CrossEntropyLoss):
    """
    pytorch's CrossEntropyLoss is fussy: 1) needs Long (int64) targets only, and 2) only 1D.
    This function satisfies these requirements
    """

    def forward(self, inp, target):
        return F.cross_entropy(inp, target.long().squeeze(), weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction)

def get_loss_module(task):

    '''
    if (task == "imputation") or (task == "transduction"):
        return MaskedMSELoss(reduction='none')  # outputs loss for each batch element
    '''

    if task == "classification":

        return NoFussCrossEntropyLoss(reduction='none') # outputs loss for each batch sample
    elif task == "regression":
        return nn.MSELoss(reduction='none')  # outputs loss for each batch sample
    else:
        raise ValueError("Loss module for task '{}' does not exist".format(task))


def l2_reg_loss(model):
    """Returns the squared L2 norm of output layer of given model"""

    for name, param in model.named_parameters():
        if name == 'output_layer.weight':
            return torch.sum(torch.square(param))


class Unsupervised(nn.Module):
    """ Unsupervised MSE Loss
    """

    def __init__(self, reduction: str = 'mean'):

        super().__init__()

        self.reduction = reduction
        self.mse_loss = nn.MSELoss(reduction=self.reduction)

    def forward(self,
                y_pred: torch.Tensor,
                y_true: torch.Tensor, mask: torch.BoolTensor) -> torch.Tensor:
        """Compute the loss between a target value and a prediction.

        Args:
            y_pred: Estimated values
            y_true: Target values
            mask: boolean tensor with 0s at places where values should be ignored and 1s where they should be considered

        Returns
        -------
        if reduction == 'none':
            (num_active,) Loss for each active batch element as a tensor with gradient attached.
        if reduction == 'mean':
            scalar mean loss over batch as a tensor with gradient attached.
        """

        # for this particular loss, one may also elementwise multiply y_pred and y_true with the inverted mask
        masked_pred = torch.masked_select(y_pred, mask)
        masked_true = torch.masked_select(y_true, mask)

        return self.mse_loss(masked_pred, masked_true)

def info_nce_loss(features, batch_size, device, temperature=0.1, n_views=2):
    '''
    https://github.com/sthalles/SimCLR/blob/master/simclr.py
    :param self:
    :param features:
    :return:
    '''

    labels = torch.cat([torch.arange(batch_size) for i in range(n_views)], dim=0)
    labels = (labels.unsqueeze(0) == labels.unsqueeze(1)).float()
    labels = labels.to(device)
    features = F.normalize(features, dim=1)
    similarity_matrix = torch.matmul(features, features.T)
    mask = torch.eye(labels.shape[0], dtype=torch.bool).to(device)
    labels = labels[~mask].view(labels.shape[0], -1)
    similarity_matrix = similarity_matrix[~mask].view(similarity_matrix.shape[0], -1)
    positives = similarity_matrix[labels.bool()].view(labels.shape[0], -1)
    negatives = similarity_matrix[~labels.bool()].view(similarity_matrix.shape[0], -1)
    logits = torch.cat([positives, negatives], dim=1)
    labels = torch.zeros(logits.shape[0], dtype=torch.long).to(device)
    logits = logits / temperature
    return logits, labels

def entropy_loss(x, y):
    criterion = torch.nn.CrossEntropyLoss()
    loss = criterion(x, y)
    return loss

def triplet_loss_pu(ref, pos, neg, loss_fn, args):
    ref = F.normalize(ref, dim=1)
    pos = F.normalize(pos, dim=1)
    neg = F.normalize(neg, dim=1)
    sim_pos = torch.matmul(ref, pos.T)
    sim_unknown = torch.matmul(ref, neg.T)
    targets_pos = torch.ones(ref.shape[0], device=args.device)
    targets_neg = torch.zeros(ref.shape[0] * ref.shape[0] - ref.shape[0], device=args.device)
    targets_u = torch.ones(ref.shape[0] * ref.shape[0] - ref.shape[0], device=args.device)

    L = len(targets_pos) + len(targets_neg)
    m1 = len(targets_pos)/L
    m2 = len(targets_neg)/L

    mask = torch.eye(ref.shape[0], dtype=torch.bool,  device=args.device)

    prob_pos = sim_pos[mask].reshape(sim_pos.shape[0], -1).squeeze()

    p_loss = loss_fn(prob_pos, targets_pos)

    prob_neg = sim_unknown[~mask].squeeze()
    n_loss = loss_fn(prob_neg, targets_neg)
    u_loss = loss_fn(1-prob_neg, targets_u)

    #loss = m2*p_loss + m1*(args.w_pu * u_loss + (1 - args.w_pu) * n_loss)
    loss = p_loss + args.w_pu * u_loss + (1 - args.w_pu) * n_loss
    return loss