'''
 * @Author: Benjay·Shaw
 * @Date: 2024-10-30 17:48:20
 * @LastEditors: Benjay·Shaw
 * @LastEditTime: 2024-10-31 15:19:05
 * @Description: 损失函数
'''
import paddle
import numpy as np


class BceDiceLoss(paddle.nn.Layer):

    def __init__(self, model, batch=True):
        super(BceDiceLoss, self).__init__()
        self.batch = batch
        self.bce_loss = paddle.nn.BCELoss().cuda(blocking=True)
        self.model = model

    def soft_dice_coeff(self, y_true, y_pred):
        smooth = 1.0
        if self.batch:
            i = paddle.sum(x=y_true)
            j = paddle.sum(x=y_pred)
            intersection = paddle.sum(x=y_true * y_pred)
        else:
            i = y_true.sum(axis=1).sum(axis=1).sum(axis=1)
            j = y_pred.sum(axis=1).sum(axis=1).sum(axis=1)
            intersection = (y_true * y_pred).sum(axis=1).sum(axis=1).sum(axis=1
                )
        score = (2.0 * intersection + smooth) / (i + j + smooth)
        return score.mean()

    def soft_dice_loss(self, y_true, y_pred):
        loss = 1 - self.soft_dice_coeff(y_true, y_pred)
        return loss

    def __call__(self, y_true, y_pred):
        a = self.bce_loss(y_pred, y_true)
        b = self.soft_dice_loss(y_true, y_pred)
        return 1 * a + 1 * b


class IoULoss(paddle.nn.Layer):

    def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True,
        smooth=1.0, square=False):
        """
        paper: https://link.springer.com/chapter/10.1007/978-3-319-50835-1_22

        """
        super(IoULoss, self).__init__()
        self.square = square
        self.do_bg = do_bg
        self.batch_dice = batch_dice
        self.apply_nonlin = apply_nonlin
        self.smooth = smooth

    def forward(self, x, y, loss_mask=None):
        shp_x = tuple(x.shape)
        if self.batch_dice:
            axes = [0] + list(range(2, len(shp_x)))
        else:
            axes = list(range(2, len(shp_x)))
        if self.apply_nonlin is not None:
            x = self.apply_nonlin(x)
        tp, fp, fn = get_tp_fp_fn(x, y, axes, loss_mask, self.square)
        iou = (tp + self.smooth) / (tp + fp + fn + self.smooth)
        if not self.do_bg:
            if self.batch_dice:
                iou = iou[1:]
            else:
                iou = iou[:, 1:]
        iou = iou.mean()
        return -iou


def l2_regularization(model, alpha):
    l2_loss = paddle.to_tensor(data=0.0, stop_gradient=not True)
    for name, param in model.named_parameters():
        if 'bias' not in name:
            l2_loss = l2_loss + 0.5 * alpha * paddle.sum(x=paddle.pow(x=
                param, y=2))
    return l2_loss


def l1_regularization(model, beta):
    l1_loss = paddle.to_tensor(data=0.0, stop_gradient=not True)
    for name, param in model.named_parameters():
        if 'bias' not in name:
            l1_loss = l1_loss + beta * paddle.sum(x=paddle.abs(x=param))
    return l1_loss


def flatten(tensor):
    """Flattens a given tensor such that the channel axis is first.
    The shapes are transformed as follows:
       (N, C, D, H, W) -> (C, N * D * H * W)
    """
    C = tensor.shape[1]
    axis_order = (1, 0) + tuple(range(2, tensor.dim()))
    transposed = tensor.transpose(perm=axis_order).contiguous()
    return transposed.view(C, -1)


def softmax_helper(x):
    rpt = [(1) for _ in range(len(tuple(x.shape)))]
    rpt[1] = x.shape[1]
    x_max = x.max(1, keepdim=True)[0].tile(repeat_times=rpt)
    e_x = paddle.exp(x=x - x_max)
    return e_x / e_x.sum(axis=1, keepdim=True).tile(repeat_times=rpt)


def sum_tensor(inp, axes, keepdim=False):
    axes = np.unique(axes).astype(int)
    if keepdim:
        for ax in axes:
            inp = inp.sum(axis=int(ax), keepdim=True)
    else:
        for ax in sorted(axes, reverse=True):
            inp = inp.sum(axis=int(ax))
    return inp


def get_tp_fp_fn(net_output, gt, axes=None, mask=None, square=False):
    """
    net_output must be (b, c, x, y(, z)))
    gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
    if mask is provided it must have shape (b, 1, x, y(, z)))
    :param net_output:
    :param gt:
    :param axes:
    :param mask: mask must be 1 for valid pixels and 0 for invalid pixels
    :param square: if True then fp, tp and fn will be squared before summation
    :return:
    """
    if axes is None:
        axes = tuple(range(2, len(tuple(net_output.shape))))
    shp_x = tuple(net_output.shape)
    shp_y = tuple(gt.shape)
    with paddle.no_grad():
        if len(shp_x) != len(shp_y):
            gt = gt.view((shp_y[0], 1, *shp_y[1:]))
        if all([(i == j) for i, j in zip(tuple(net_output.shape), tuple(gt.
            shape))]):
            y_onehot = gt
        else:
            gt = gt.astype(dtype='int64')
            y_onehot = paddle.zeros(shape=shp_x)
            if net_output.device.type == 'cuda':
                y_onehot = y_onehot.cuda(device_id=net_output.device.index,
                    blocking=True)
            y_onehot.put_along_axis_(axis=1, indices=gt, values=1,
                broadcast=False)
    tp = net_output * y_onehot
    fp = net_output * (1 - y_onehot)
    fn = (1 - net_output) * y_onehot
    if mask is not None:
        tp = paddle.stack(x=tuple(x_i * mask[:, 0] for x_i in paddle.unbind
            (input=tp, axis=1)), axis=1)
        fp = paddle.stack(x=tuple(x_i * mask[:, 0] for x_i in paddle.unbind
            (input=fp, axis=1)), axis=1)
        fn = paddle.stack(x=tuple(x_i * mask[:, 0] for x_i in paddle.unbind
            (input=fn, axis=1)), axis=1)
    if square:
        tp = tp ** 2
        fp = fp ** 2
        fn = fn ** 2
    tp = sum_tensor(tp, axes, keepdim=False)
    fp = sum_tensor(fp, axes, keepdim=False)
    fn = sum_tensor(fn, axes, keepdim=False)
    return tp, fp, fn
