#!/usr/bin/env python
# -*- coding: utf-8 -*-

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable


# def dice_coefficient(predict, target, num_classes, epsilon=1e-6):
#
#     predict = predict.permute(0, 2, 3, 4, 1).contiguous().
#     dice_coef = torch.zeros(1, ).to(predict.device, dtype=torch.float32)
#     for channel in range(predict.shape[1]):
#         num = torch.sum(torch.dot(predict[:, channel, ...].reshape(-1), target[:, channel, ...].reshape(-1)))
#         den = (torch.sum(predict[:, channel, ...]) + torch.sum(target[:, channel, ...]))
#         dice_coef += (2 * num + epsilon) / (den + epsilon)
#     # debug_loss = loss.detach().cpu().numpy()
#     return dice_coef / predict.shape[1]


# def dice_loss(predict, target, num_classes, epsilon=1e-6):
#     # About the implement of the dice loss, we can watch this passage https://zhuanlan.zhihu.com/p/269592183
#     return 1 - dice_coefficient_3D(predict, target, num_classes, epsilon=epsilon)

class DiceLossOwn(nn.Module):
    def __init__(self, num_classes, epsilon=1e-6, need_activation=False):
        super(DiceLossOwn, self).__init__()
        self.num_classes = num_classes
        self.epsilon = epsilon
        self.need_activation = need_activation
        self.sigmoid = nn.Sigmoid()
        self.softmax = nn.Softmax(dim=1)

    def forward(self, predict, target):
        """
        The shape of predict and target should be B,W,H,L,C
        :param predict:
        :param target:
        :return:
        """
        if self.need_activation:
            if self.num_classes == 2:
                predict = self.sigmoid(predict)
            elif self.num_classes > 2:
                predict = self.softmax(predict)
        dice_list = []
        total_dice = torch.tensor(0.).cuda()
        # debug_target = target.clone().detach().cpu().numpy()
        for c in range(1, self.num_classes):
            intersection = torch.sum(torch.mul(predict[:, c, ...], target[:, c, ...]))
            debug_predict_sum = torch.sum(predict[:, c, ...])
            debug_target_sum = torch.sum(target[:, c, ...])
            dice = (2 * intersection + self.epsilon) / (debug_predict_sum + debug_target_sum + self.epsilon)
            dice_list.append(dice.item())
            total_dice += dice
        loss = 1. - total_dice / float(self.num_classes - 1)
        return loss, dice_list




class BinaryDiceLoss(nn.Module):
    """Dice loss of binary class
    copy from https://github.com/hubutui/DiceLoss-PyTorch/blob/master/loss.py
    Args:
        smooth: A float number to smooth loss, and avoid NaN error, default: 1
        p: Denominator value: \sum{x^p} + \sum{y^p}, default: 2
        predict: A tensor of shape [N, *]
        target: A tensor of shape same with predict
        reduction: Reduction method to apply, return mean over batch if 'mean',
            return sum if 'sum', return a tensor of shape [N,] if 'none'
    Returns:
        Loss tensor according to arg reduction
    Raise:
        Exception if unexpected reduction
    """

    def __init__(self, smooth=1e-6, p=2, reduction='mean'):
        super(BinaryDiceLoss, self).__init__()
        self.smooth = smooth
        self.p = p
        self.reduction = reduction

    def forward(self, predict, target):
        assert predict.shape[0] == target.shape[0], "predict & target batch size don't match"
        predict = predict.contiguous().view(predict.shape[0], -1)
        target = target.contiguous().view(target.shape[0], -1)

        num = torch.sum(torch.mul(predict, target), dim=1) + self.smooth
        den = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1) + self.smooth

        loss = 1 - num / den

        if self.reduction == 'mean':
            return loss.mean()
        elif self.reduction == 'sum':
            return loss.sum()
        elif self.reduction == 'none':
            return loss
        else:
            raise Exception('Unexpected reduction {}'.format(self.reduction))


class DiceLoss(nn.Module):
    """Dice loss, need one hot encode input
    Args:
        weights: An array of shape [num_classes,]
        ignore_index: class index to ignore
        predict: A tensor of shape [N, C, *]
        target: A tensor of same shape with predict
        other args pass to BinaryDiceLoss
    Return:
        same as BinaryDiceLoss
    """

    def __init__(self, weights=None, ignore_index=None, **kwargs):
        super(DiceLoss, self).__init__()
        self.kwargs = kwargs
        self.weight = weights
        self.ignore_index = ignore_index

    def forward(self, predict, target):
        assert predict.shape == target.shape, 'predict & target shape do not match'
        dice = BinaryDiceLoss(**self.kwargs)
        total_loss = 0
        # predict = F.softmax(predict, dim=1) activation function at the end of network

        for i in range(target.shape[1]):
            if i != self.ignore_index:
                dice_loss = dice(predict[:, i], target[:, i])
                if self.weight is not None:
                    assert self.weight.shape[0] == target.shape[1], \
                        'Expect weight shape [{}], get[{}]'.format(target.shape[1], self.weight.shape[0])
                    dice_loss *= self.weights[i]
                total_loss += dice_loss

        return total_loss / target.shape[1]


class FocalLoss(nn.Module):
    """
    copy from the https://github.com/clcarwin/focal_loss_pytorch/blob/master/focalloss.py
    """

    def __init__(self, gamma=0, alpha=None, loss_compu="mean"):
        super(FocalLoss, self).__init__()
        self.gamma = gamma
        self.alpha = alpha
        if isinstance(alpha, (float, int)): self.alpha = torch.Tensor([alpha, 1 - alpha])
        if isinstance(alpha, list): self.alpha = torch.Tensor(alpha)
        self.loss_compu = loss_compu

    def forward(self, predict, target):
        if predict.dim() > 2:
            predict = predict.view(predict.size(0), predict.size(1), -1)  # N,C,H,W => N,C,H*W
            predict = predict.transpose(1, 2)  # N,C,H*W => N,H*W,C
            predict = predict.contiguous().view(-1, predict.size(2))  # N,H*W,C => N*H*W,C
        target = target.view(-1, 1)

        # logpt = F.log_softmax(predict)
        logpt = predict
        logpt = logpt.gather(1, target)
        logpt = logpt.view(-1)
        pt = Variable(logpt.data.exp())

        if self.alpha is not None:
            if self.alpha.type() != predict.data.type():
                self.alpha = self.alpha.type_as(predict.data)
            at = self.alpha.gather(0, target.data.view(-1))
            logpt = logpt * Variable(at)

        loss = -1 * (1 - pt) ** self.gamma * logpt
        if self.self.loss_compu == "mean":
            return loss.mean()
        else:
            return loss.sum()
