#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import torch
import torch.nn.functional as F

smooth = 1.
epsilon = 1e-6


def computeLoss(loss_function, predict, label, loss_weights=None):
    """
    This method defaults to using the same loss function for each predict-label
    :param loss_function: It can be a loss list or a specific loss function
    :param predict: The outputs of model, list or tensor
    :param label: A tensor
    :param loss_weights: It will work if the loss_function is a list
    :return:
    """
    total_loss = torch.rand(size=(1,), device=label.device)
    if isinstance(predict, list):
        for i, pre in enumerate(predict):
            if isinstance(loss_function, list):
                tmp_loss = torch.rand(size=(1,), device=label.device)
                for loss_f in loss_function:
                    tmp_loss += loss_f(pre, label)
                total_loss += tmp_loss * loss_weights[i]
            else:
                total_loss += loss_function(pre, label) * loss_weights[i]

    else:
        if isinstance(loss_function, list):
            for loss in loss_function:
                total_loss += loss(predict, label)
        else:
            total_loss = loss_function(predict, label)
    return total_loss


def sad_loss(pred, target, encoder_flag=False):
    """
    AD: atention distillation loss
    : param pred: input prediction
    : param target: input target
    : param encoder_flag: boolean, True=encoder-side AD, False=decoder-side AD
    """
    target = target.detach()
    if (target.size(-1) == pred.size(-1)) and (target.size(-2) == pred.size(-2)):
        # target and prediction have the same spatial resolution
        pass
    else:
        if encoder_flag == True:
            # target is smaller than prediction
            # use consecutive layers with scale factor = 2
            target = F.interpolate(target, scale_factor=2, mode='trilinear')
        else:
            # prediction is smaller than target
            # use consecutive layers with scale factor = 2
            pred = F.interpolate(pred, scale_factor=2, mode='trilinear')

    num_batch = pred.size(0)
    pred = pred.view(num_batch, -1)
    target = target.view(num_batch, -1)
    pred = F.softmax(pred, dim=1)
    target = F.softmax(target, dim=1)
    return F.mse_loss(pred, target)


def dice_loss(pred, target, mask=None):
    """
    DSC loss
    : param pred: input prediction
    : param target: input target
    : param mask: mask of valid regions
    """
    num_category = 3  # containing background and others
    pred = F.softmax(pred, dim=1)  # first perform Softmax on the prediction results; nB, nC, D, H, W
    pred = pred.permute((1, 0, 2, 3, 4)).contiguous()
    loss = None

    if mask is not None:
        mask = mask.view(-1)
        idcs_effective = (mask > 0)
        if torch.sum(idcs_effective) == 0:
            return loss

    for curcategory in range(1, num_category):
        curtarget = (target == curcategory)
        curpred = pred[curcategory]
        iflat = curpred.view(-1)
        tflat = curtarget.view(-1)
        if mask is not None:
            iflat = iflat[idcs_effective]
            tflat = tflat[idcs_effective]
        intersection = torch.sum((iflat * tflat))
        curdice = (2. * intersection + smooth) / (torch.sum(iflat) + torch.sum(tflat) + smooth)
        if loss is None:
            loss = curdice
        else:
            loss += curdice
    return 1. - loss / float(num_category - 1)


def binary_dice_loss(pred, target):
    """
    DSC loss
    : param pred: input prediction
    : param target: input target
    """
    iflat = pred.view(-1)
    tflat = target.view(-1)
    intersection = torch.sum((iflat * tflat))
    return 1. - ((2. * intersection + smooth) / (torch.sum(iflat) + torch.sum(tflat) + smooth))


def cross_entropy(y_pred, y_true, mask=None):
    """
    Cross-entropy loss
    :param y_pred: input prediction
    :param y_true: input target
    :param mask: mask of valid regions
    """
    y_true = y_true.view(-1)
    num_category = 3  # containing background and others
    if mask is not None:
        mask = mask.view(-1)
        idcs_effective = (mask > 0)
        if torch.sum(idcs_effective) == 0:
            return None
        else:
            y_true = y_true[idcs_effective]
            y_pred = y_pred.permute((0, 2, 3, 4, 1)).contiguous()
            y_pred = y_pred.view(-1, num_category)
            y_pred = y_pred[idcs_effective]

    weights = torch.tensor([1., 20., 20.]).cuda()  # more weights on arteries and veins
    return F.cross_entropy(y_pred, y_true, reduction='mean', weight=weights)


def binary_cross_entropy(y_pred, y_true):
    """
    Binary cross entropy loss
    : param y_pred: input prediction
    : param y_true: input target
    """
    y_pred = y_pred.float()
    y_true = y_true.float()
    return F.binary_cross_entropy(y_pred, y_true, reduction='mean')


def binary_focal_loss(y_pred, y_true, alpha=0.25, gamma=2.0):
    """
    Focal loss
    : param y_pred: input prediction
    : param y_true: input target
    : param alpha: balancing positive and negative samples, default=0.25
    : param gamma: penalizing wrong predictions, default=2
    """
    # alpha balance weight for unbalanced positive and negative samples
    # clip to prevent NaN's and Inf's
    y_pred_flatten = torch.clamp(y_pred, min=epsilon, max=1. - epsilon)
    y_pred_flatten = y_pred_flatten.view(-1).float()
    y_true_flatten = y_true.detach()
    y_true_flatten = y_true_flatten.view(-1).float()
    loss = 0

    idcs = (y_true_flatten > 0)
    y_true_pos = y_true_flatten[idcs]
    y_pred_pos = y_pred_flatten[idcs]
    y_true_neg = y_true_flatten[~idcs]
    y_pred_neg = y_pred_flatten[~idcs]

    if y_pred_pos.size(0) != 0 and y_true_pos.size(0) != 0:
        # positive samples
        logpt = torch.log(y_pred_pos)
        loss += -1. * torch.mean(torch.pow((1. - y_pred_pos), gamma) * logpt) * alpha

    if y_pred_neg.size(0) != 0 and y_true_neg.size(0) != 0:
        # negative samples
        logpt2 = torch.log(1. - y_pred_neg)
        loss += -1. * torch.mean(torch.pow(y_pred_neg, gamma) * logpt2) * (1. - alpha)

    return loss


def focal_loss(y_pred, y_true, mask=None, alpha=0.25, gamma=2.0):
    """
    Focal loss
    :param y_pred: input prediction
    :param y_true: input target
    :param mask: mask of valid regions
    :param alpha: balancing positive and negative samples, default=0.25
    :param gamma: penalizing wrong predictions, default=2
    """
    # alpha balance weight for unbalanced positive and negative samples
    # clip to prevent NaN's and Inf's
    num_category = 3
    alpha_category = [1. - alpha] + [alpha / (num_category - 1.)] * (num_category - 1)
    # first perform Softmax on the prediction results; nB, nC, D, H, W
    # y_pred_flatten = F.softmax(y_pred, dim=1)
    y_pred_flatten = y_pred
    y_pred_flatten = torch.clamp(y_pred_flatten, min=epsilon, max=1. - epsilon)
    y_pred_flatten = y_pred_flatten.permute(1, 0, 2, 3, 4).contiguous()
    loss = 0
    y_true_flatten = y_true.detach()
    if mask is not None:
        mask_flatten = mask.view(-1)
        idcs_effective = (mask_flatten > 0)
        if torch.sum(idcs_effective) == 0:
            return loss

    for curcategory in range(0, num_category):
        curtarget = (y_true_flatten == curcategory)
        curpred = y_pred_flatten[curcategory]

        iflat = curpred.view(-1).float()
        tflat = curtarget.view(-1).float()
        if mask is not None:
            iflat = iflat[idcs_effective]
            tflat = tflat[idcs_effective]

        idcs = (tflat > 0)
        tflat_pos = tflat[idcs]
        iflat_pos = iflat[idcs]

        if tflat_pos.size(0) != 0 and iflat_pos.size(0) != 0:
            logpt = torch.log(iflat_pos)
            loss += -1. * torch.mean(torch.pow((1. - iflat_pos), gamma) * logpt) * alpha_category[curcategory]

    return loss

# def binary_focal_loss(y_pred, y_true, alpha=0.25, gamma=2.0):
# 	"""
# 	Focal loss
# 	: param y_pred: input prediction
# 	: param y_true: input target
# 	: param alpha: balancing positive and negative samples, default=0.75
# 	: param gamma: penalizing wrong predictions, default=2
# 	"""
# 	# alpha balance weight for unbalanced positive and negative samples
# 	# clip to prevent NaN's and Inf's
# 	y_pred = F.relu(y_pred)
# 	y_true = F.relu(y_true)
# 	y_pred = torch.clamp(y_pred, min=epsilon, max=1.-epsilon)
# 	y_true = y_true.view(-1).float()
# 	y_pred = y_pred.view(-1).float()
# 	idcs = (y_true > 0)
# 	y_true_pos = y_true[idcs]
# 	y_pred_pos = y_pred[idcs]
# 	y_true_neg = y_true[~idcs]
# 	y_pred_neg = y_pred[~idcs]
#
# 	if (y_pred_pos.size(0) != 0 and y_true_pos.size(0) != 0) and (y_pred_neg.size(0) != 0 and y_true_neg.size(0) != 0):
# 		# positive samples
# 		logpt = torch.log(y_pred_pos)
# 		loss = -1. * torch.mean(torch.pow((1. - y_pred_pos), gamma) * logpt) * alpha
# 		# negative samples
# 		logpt2 = torch.log(1. - y_pred_neg)
# 		loss += -1. * torch.mean(torch.pow(y_pred_neg, gamma) * logpt2) * (1. - alpha)
# 		return loss
# 	else:
# 		# use binary cross entropy to avoid NaN/Inf caused by missing positive or negative samples
# 		return F.binary_cross_entropy(y_pred, y_true)


# def focal_loss(y_pred, y_true, mask=None, alpha=0.25, gamma=2.0):
# 	"""
# 	Focal loss
# 	:param y_pred: input prediction
# 	:param y_true: input target
# 	:param mask: mask of valid regions
# 	:param alpha: balancing positive and negative samples, default=0.25
# 	:param gamma: penalizing wrong predictions, default=2
# 	"""
# 	# alpha balance weight for unbalanced positive and negative samples
# 	# clip to prevent NaN's and Inf's
# 	num_category = 3
# 	# first perform Softmax on the prediction results; nB, nC, D, H, W
# 	y_pred = F.softmax(y_pred, dim=1)
# 	y_pred = torch.clamp(y_pred, min=epsilon, max=1. - epsilon)
# 	y_pred = y_pred.permute(1, 0, 2, 3, 4).contiguous()
#
# 	loss = None
# 	if mask is not None:
# 		mask = mask.view(-1)
# 		idcs_effective = (mask > 0)
# 		if torch.sum(idcs_effective) == 0:
# 			return loss
#
# 	for curcategory in range(1, num_category):
# 		curtarget = (y_true == curcategory)
# 		curpred = y_pred[curcategory]
# 		iflat = curpred.view(-1).float()
# 		tflat = curtarget.view(-1).float()
# 		if mask is not None:
# 			iflat = iflat[idcs_effective]
# 			tflat = tflat[idcs_effective]
#
# 		idcs = (tflat > 0)
# 		tflat_pos = tflat[idcs]
# 		iflat_pos = iflat[idcs]
# 		tflat_neg = tflat[~idcs]
# 		iflat_neg = iflat[~idcs]
#
# 		curfocal = None
#
# 		if (iflat_pos.size(0) != 0 and tflat_pos.size(0) != 0) and (iflat_neg.size(0) != 0 and tflat_neg.size(0) != 0):
# 			# positive samples
# 			logpt = torch.log(iflat_pos)
# 			curfocal = -1. * torch.mean(torch.pow((1. - iflat_pos), gamma) * logpt) * alpha
# 			# negative samples
# 			logpt2 = torch.log(1. - iflat_neg)
# 			curfocal += -1. * torch.mean(torch.pow(iflat_neg, gamma) * logpt2) * (1. - alpha)
# 		else:
# 			# use binary cross entropy to avoid NaN/Inf caused by missing positive or negative samples
# 			curfocal = F.binary_cross_entropy(iflat, tflat)
#
# 		if loss is None:
# 			loss = curfocal
# 		else:
# 			loss += curfocal
# 	return loss/float(num_category-1)
