import torch
from torch.nn import CrossEntropyLoss


def self_entropy(output):
    output = torch.softmax(output, dim=-1)
    loss = -(output * torch.log(output)).mean(dim=-1)
    loss = loss.mean()
    return loss

def contrastive_ent(args, labels, feature):

    feature = feature.view(-1, feature.shape[-1])
    labels = labels.view(-1)
    dot_div_temp = torch.mm(feature, feature.T) / args.cl_temp
    # dot_div_temp_norm = dot_div_temp - 1.0 / args.cl_temp
    # exp_dot_temp = torch.exp(dot_div_temp_norm) + 1e-8
    dot_div_temo_max, _ = torch.max(dot_div_temp, dim=-1, keepdim=True)
    dot_div_temp_norm = dot_div_temp - dot_div_temo_max.detach()
    exp_dot_temp = torch.exp(dot_div_temp_norm)
    labels = labels.contiguous().view(-1, 1)
    mask = torch.eq(labels, labels.T).float()

    logits_mask = torch.scatter(
        torch.ones_like(mask),
        1,
        torch.arange(len(labels)).view(-1, 1).to(device=feature.device),
        0)

    mask = mask * logits_mask
    mask[labels.view(-1) == CrossEntropyLoss().ignore_index] = \
        torch.zeros(mask.shape[1]).to(device=feature.device)
    logits_mask[labels.view(-1) == CrossEntropyLoss().ignore_index] = \
        torch.zeros(logits_mask.shape[1]).to(device=feature.device)
    idx = torch.where(labels.view(-1) * mask.sum(dim=-1)
                      * (labels.view(-1) + 1) != 0)[0]
    if len(idx) == 0:
        return torch.tensor(0.0).to(device=feature.device)
    logits_mask = logits_mask[idx]
    exp_dot_temp = exp_dot_temp[idx]
    dot_div_temp_norm = dot_div_temp_norm[idx]
    mask = mask[idx]
    if args.ignore_pad_con:
        idx_not_pad = torch.where(labels.view(-1) != CrossEntropyLoss().ignore_index)[0]
        logits_mask = logits_mask[:, idx_not_pad]
        exp_dot_temp = exp_dot_temp[:, idx_not_pad]
        dot_div_temp_norm = dot_div_temp_norm[:, idx_not_pad]
        mask = mask[:, idx_not_pad]

    exp_logits = exp_dot_temp * logits_mask

    log_prob = dot_div_temp_norm - torch.log(exp_logits.sum(1, keepdim=True))
    mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
    scloss = - mean_log_prob_pos.mean()

    return scloss
