import torch
import torch.nn as nn

def self_adaptive_weighting_loss(
    out, gt_map, *, gamma=32, margin = 0.25
):
    

    # out = 1.0-out
    gt_map_neg = 1-gt_map
    gt_mapx = gt_map.type(dtype=torch.bool)
    sp = out[gt_mapx]
    gt_map_negx = gt_map_neg.type(dtype=torch.bool)
    sn = out[gt_map_negx]

    sp = sp.view(out.size()[0],-1)
    sn = sn.view(out.size()[0],-1)

    ap = torch.clamp_min(-sp.detach() + 1 + margin, min=0.)
    an = torch.clamp_min(sn.detach() + margin, min=0.)

    delta_p = 1-margin
    delta_n = margin

    logit_p = -ap * (sp - delta_p) * gamma
    logit_n = an * (sn - delta_n) * gamma

    soft_plus = nn.Softplus()
    loss_circle = soft_plus(torch.logsumexp(logit_n, dim=1) + torch.logsumexp(logit_p, dim=1))

    loss_regu = loss_circle.mean()

    return loss_regu


def self_adaptive_weighting_loss_float(
    out, gt_map, *, gamma=32, margin = 0.25
):
    
    gt_mapx = gt_map > 0.0
    sp = out[gt_mapx]
    gt_map_negx = (gt_map == 0.0)
    sn = out[gt_map_negx]

    sp = sp.view(out.size()[0],-1)
    sn = sn.view(out.size()[0],-1)

    ap = torch.clamp_min(-sp.detach() + 1 + margin, min=0.)
    an = torch.clamp_min(sn.detach() + margin, min=0.)

    delta_p = 1-margin
    delta_n = margin

    logit_p = -ap * (sp - delta_p) * gamma
    logit_n = an * (sn - delta_n) * gamma

    soft_plus = nn.Softplus()
    loss_circle = soft_plus(torch.logsumexp(logit_n, dim=1) + torch.logsumexp(logit_p, dim=1))

    loss_regu = loss_circle.mean()

    return loss_regu
