| | from typing import Callable |
| |
|
| | import torch |
| | from .ddp_allgather import AllGatherGrad |
| | from .tensor_utilities import sum_tensor |
| | from torch import nn |
| |
|
| |
|
| | class SoftDiceLoss(nn.Module): |
| | def __init__(self, apply_nonlin: Callable = None, batch_dice: bool = False, do_bg: bool = True, smooth: float = 1., |
| | ddp: bool = True, clip_tp: float = None): |
| | """ |
| | """ |
| | super(SoftDiceLoss, self).__init__() |
| |
|
| | self.do_bg = do_bg |
| | self.batch_dice = batch_dice |
| | self.apply_nonlin = apply_nonlin |
| | self.smooth = smooth |
| | self.clip_tp = clip_tp |
| | self.ddp = ddp |
| |
|
| | def forward(self, x, y, loss_mask=None): |
| | shp_x = x.shape |
| |
|
| | if self.batch_dice: |
| | axes = [0] + list(range(2, len(shp_x))) |
| | else: |
| | axes = list(range(2, len(shp_x))) |
| |
|
| | if self.apply_nonlin is not None: |
| | x = self.apply_nonlin(x) |
| |
|
| | tp, fp, fn, _ = get_tp_fp_fn_tn(x, y, axes, loss_mask, False) |
| |
|
| | if self.ddp and self.batch_dice: |
| | tp = AllGatherGrad.apply(tp).sum(0) |
| | fp = AllGatherGrad.apply(fp).sum(0) |
| | fn = AllGatherGrad.apply(fn).sum(0) |
| |
|
| | if self.clip_tp is not None: |
| | tp = torch.clip(tp, min=self.clip_tp , max=None) |
| |
|
| | nominator = 2 * tp |
| | denominator = 2 * tp + fp + fn |
| |
|
| | dc = (nominator + self.smooth) / (torch.clip(denominator + self.smooth, 1e-8)) |
| |
|
| | if not self.do_bg: |
| | if self.batch_dice: |
| | dc = dc[1:] |
| | else: |
| | dc = dc[:, 1:] |
| | dc = dc.mean() |
| |
|
| | return -dc |
| |
|
| | class MemoryEfficientSoftDiceLoss(nn.Module): |
| | def __init__(self, apply_nonlin: Callable = None, batch_dice: bool = False, do_bg: bool = True, smooth: float = 1., |
| | ddp: bool = True): |
| | """ |
| | saves 1.6 GB on Dataset017 3d_lowres |
| | """ |
| | super(MemoryEfficientSoftDiceLoss, self).__init__() |
| |
|
| | self.do_bg = do_bg |
| | self.batch_dice = batch_dice |
| | self.apply_nonlin = apply_nonlin |
| | self.smooth = smooth |
| | self.ddp = ddp |
| |
|
| | def forward(self, x, y, loss_mask=None): |
| | shp_x, shp_y = x.shape, y.shape |
| |
|
| | if self.apply_nonlin is not None: |
| | x = self.apply_nonlin(x) |
| |
|
| | if not self.do_bg: |
| | x = x[:, 1:] |
| |
|
| | |
| | axes = list(range(2, len(shp_x))) |
| |
|
| | with torch.no_grad(): |
| | if len(shp_x) != len(shp_y): |
| | y = y.view((shp_y[0], 1, *shp_y[1:])) |
| |
|
| | if all([i == j for i, j in zip(shp_x, shp_y)]): |
| | |
| | y_onehot = y |
| | else: |
| | gt = y.long() |
| | y_onehot = torch.zeros(shp_x, device=x.device, dtype=torch.bool) |
| | y_onehot.scatter_(1, gt, 1) |
| |
|
| | if not self.do_bg: |
| | y_onehot = y_onehot[:, 1:] |
| | sum_gt = y_onehot.sum(axes) if loss_mask is None else (y_onehot * loss_mask).sum(axes) |
| |
|
| | intersect = (x * y_onehot).sum(axes) if loss_mask is None else (x * y_onehot * loss_mask).sum(axes) |
| | sum_pred = x.sum(axes) if loss_mask is None else (x * loss_mask).sum(axes) |
| |
|
| | if self.ddp and self.batch_dice: |
| | intersect = AllGatherGrad.apply(intersect).sum(0) |
| | sum_pred = AllGatherGrad.apply(sum_pred).sum(0) |
| | sum_gt = AllGatherGrad.apply(sum_gt).sum(0) |
| |
|
| | if self.batch_dice: |
| | intersect = intersect.sum(0) |
| | sum_pred = sum_pred.sum(0) |
| | sum_gt = sum_gt.sum(0) |
| |
|
| | dc = (2 * intersect + self.smooth) / (torch.clip(sum_gt + sum_pred + self.smooth, 1e-8)) |
| |
|
| | dc = dc.mean() |
| | return -dc |
| |
|
| | def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False): |
| | """ |
| | net_output must be (b, c, x, y(, z))) |
| | gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z)) |
| | if mask is provided it must have shape (b, 1, x, y(, z))) |
| | :param net_output: |
| | :param gt: |
| | :param axes: can be (, ) = no summation |
| | :param mask: mask must be 1 for valid pixels and 0 for invalid pixels |
| | :param square: if True then fp, tp and fn will be squared before summation |
| | :return: |
| | """ |
| | if axes is None: |
| | axes = tuple(range(2, len(net_output.size()))) |
| |
|
| | shp_x = net_output.shape |
| | shp_y = gt.shape |
| |
|
| | with torch.no_grad(): |
| | if len(shp_x) != len(shp_y): |
| | gt = gt.view((shp_y[0], 1, *shp_y[1:])) |
| |
|
| | if all([i == j for i, j in zip(net_output.shape, gt.shape)]): |
| | |
| | y_onehot = gt |
| | else: |
| | gt = gt.long() |
| | y_onehot = torch.zeros(shp_x, device=net_output.device) |
| | y_onehot.scatter_(1, gt, 1) |
| |
|
| | tp = net_output * y_onehot |
| | fp = net_output * (1 - y_onehot) |
| | fn = (1 - net_output) * y_onehot |
| | tn = (1 - net_output) * (1 - y_onehot) |
| |
|
| | if mask is not None: |
| | with torch.no_grad(): |
| | mask_here = torch.tile(mask, (1, tp.shape[1], *[1 for i in range(2, len(tp.shape))])) |
| | tp *= mask_here |
| | fp *= mask_here |
| | fn *= mask_here |
| | tn *= mask_here |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | if square: |
| | tp = tp ** 2 |
| | fp = fp ** 2 |
| | fn = fn ** 2 |
| | tn = tn ** 2 |
| |
|
| | if len(axes) > 0: |
| | tp = sum_tensor(tp, axes, keepdim=False) |
| | fp = sum_tensor(fp, axes, keepdim=False) |
| | fn = sum_tensor(fn, axes, keepdim=False) |
| | tn = sum_tensor(tn, axes, keepdim=False) |
| |
|
| | return tp, fp, fn, tn |
| |
|
| |
|
| | if __name__ == '__main__': |
| | from nnunetv2.utilities.helpers import softmax_helper_dim1 |
| | pred = torch.rand((2, 3, 32, 32, 32)) |
| | ref = torch.randint(0, 3, (2, 32, 32, 32)) |
| |
|
| | dl_old = SoftDiceLoss(apply_nonlin=softmax_helper_dim1, batch_dice=True, do_bg=False, smooth=0, ddp=False) |
| | dl_new = MemoryEfficientSoftDiceLoss(apply_nonlin=softmax_helper_dim1, batch_dice=True, do_bg=False, smooth=0, ddp=False) |
| | res_old = dl_old(pred, ref) |
| | res_new = dl_new(pred, ref) |
| | print(res_old, res_new) |
| |
|