|
|
"""
|
|
|
Most borrow from: https://github.com/Alibaba-MIIL/ASL
|
|
|
"""
|
|
|
import torch
|
|
|
import torch.nn as nn
|
|
|
|
|
|
|
|
|
class AsymmetricLoss(nn.Module):
|
|
|
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=False):
|
|
|
super(AsymmetricLoss, self).__init__()
|
|
|
|
|
|
self.gamma_neg = gamma_neg
|
|
|
self.gamma_pos = gamma_pos
|
|
|
self.clip = clip
|
|
|
self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
|
|
|
self.eps = eps
|
|
|
|
|
|
def forward(self, x, y):
|
|
|
""""
|
|
|
Parameters
|
|
|
----------
|
|
|
x: input logits
|
|
|
y: targets (multi-label binarized vector)
|
|
|
"""
|
|
|
|
|
|
|
|
|
x_sigmoid = torch.sigmoid(x)
|
|
|
xs_pos = x_sigmoid
|
|
|
xs_neg = 1 - x_sigmoid
|
|
|
|
|
|
|
|
|
if self.clip is not None and self.clip > 0:
|
|
|
xs_neg = (xs_neg + self.clip).clamp(max=1)
|
|
|
|
|
|
|
|
|
los_pos = y * torch.log(xs_pos.clamp(min=self.eps, max=1-self.eps))
|
|
|
los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps, max=1-self.eps))
|
|
|
loss = los_pos + los_neg
|
|
|
|
|
|
|
|
|
if self.gamma_neg > 0 or self.gamma_pos > 0:
|
|
|
if self.disable_torch_grad_focal_loss:
|
|
|
torch._C.set_grad_enabled(False)
|
|
|
pt0 = xs_pos * y
|
|
|
pt1 = xs_neg * (1 - y)
|
|
|
pt = pt0 + pt1
|
|
|
one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y)
|
|
|
one_sided_w = torch.pow(1 - pt, one_sided_gamma)
|
|
|
if self.disable_torch_grad_focal_loss:
|
|
|
torch._C.set_grad_enabled(True)
|
|
|
loss *= one_sided_w
|
|
|
|
|
|
return -loss.sum()
|
|
|
|
|
|
class AsymmetricLossOptimized(nn.Module):
|
|
|
''' Notice - optimized version, minimizes memory allocation and gpu uploading,
|
|
|
favors inplace operations'''
|
|
|
|
|
|
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-5, disable_torch_grad_focal_loss=False):
|
|
|
super(AsymmetricLossOptimized, self).__init__()
|
|
|
|
|
|
self.gamma_neg = gamma_neg
|
|
|
self.gamma_pos = gamma_pos
|
|
|
self.clip = clip
|
|
|
self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
|
|
|
self.eps = eps
|
|
|
|
|
|
self.targets = self.anti_targets = self.xs_pos = self.xs_neg = self.asymmetric_w = self.loss = None
|
|
|
|
|
|
def forward(self, x, y):
|
|
|
""""
|
|
|
Parameters
|
|
|
----------
|
|
|
x: input logits
|
|
|
y: targets (multi-label binarized vector)
|
|
|
"""
|
|
|
|
|
|
self.targets = y
|
|
|
self.anti_targets = 1 - y
|
|
|
|
|
|
|
|
|
self.xs_pos = torch.sigmoid(x)
|
|
|
self.xs_neg = 1.0 - self.xs_pos
|
|
|
|
|
|
|
|
|
if self.clip is not None and self.clip > 0:
|
|
|
self.xs_neg.add_(self.clip).clamp_(max=1)
|
|
|
|
|
|
|
|
|
self.loss = self.targets * torch.log(self.xs_pos.clamp(min=self.eps))
|
|
|
self.loss.add_(self.anti_targets * torch.log(self.xs_neg.clamp(min=self.eps)))
|
|
|
|
|
|
|
|
|
if self.gamma_neg > 0 or self.gamma_pos > 0:
|
|
|
if self.disable_torch_grad_focal_loss:
|
|
|
with torch.no_grad():
|
|
|
|
|
|
|
|
|
self.xs_pos = self.xs_pos * self.targets
|
|
|
self.xs_neg = self.xs_neg * self.anti_targets
|
|
|
self.asymmetric_w = torch.pow(1 - self.xs_pos - self.xs_neg,
|
|
|
self.gamma_pos * self.targets + self.gamma_neg * self.anti_targets)
|
|
|
|
|
|
|
|
|
self.loss *= self.asymmetric_w
|
|
|
else:
|
|
|
self.xs_pos = self.xs_pos * self.targets
|
|
|
self.xs_neg = self.xs_neg * self.anti_targets
|
|
|
self.asymmetric_w = torch.pow(1 - self.xs_pos - self.xs_neg,
|
|
|
self.gamma_pos * self.targets + self.gamma_neg * self.anti_targets)
|
|
|
self.loss *= self.asymmetric_w
|
|
|
_loss = - self.loss.sum() / x.size(0)
|
|
|
_loss = _loss / y.size(1) * 1000
|
|
|
|
|
|
return _loss
|
|
|
|