#    Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
#    Licensed under the Apache License, Version 2.0 (the "License");
#    you may not use this file except in compliance with the License.
#    You may obtain a copy of the License at
#
#        http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS,
#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#    See the License for the specific language governing permissions and
#    limitations under the License.


from src.nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss,RobustCrossEntropyLoss2d
from src.nnunet.utilities.nd_softmax import softmax_helper
from src.nnunet.utilities.tensor_utilities import sum_tensor,sum_tensor_axis_023
from mindspore import nn
import numpy as np
import mindspore.ops as ops
from mindspore.nn.loss.loss import LossBase
from mindspore import nn, Tensor



# class SoftDiceLoss(LossBase):
#     def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.,loss_type='3d'):
#         """
#         """
#         super(SoftDiceLoss, self).__init__()
#         self.mean = ops.ReduceMean()
#         self.do_bg = do_bg
#         self.batch_dice = batch_dice
#         self.apply_nonlin = apply_nonlin
#         self.smooth = smooth
#         self.reshape = ops.Reshape()
#         self.zeros = ops.Zeros()
#         if loss_type == '3d':
#             self.sum_tensor = sum_tensor
#             self.get_dice = self.get_dice3d
#         else:
#             self.sum_tensor = sum_tensor_axis_023
#             self.get_dice = self.get_dice2d
#
#     def get_dice3d(self,Tensor):
#         return Tensor[:,1:]
#     def get_dice2d(self,Tensor):
#         return Tensor[1:]
#
#     def get_tp_fp_fn_tn(self,net_output, gt, axes=None, mask=None, square=False):
#         """
#         net_output must be (b, c, x, y(, z)))
#         gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
#         if mask is provided it must have shape (b, 1, x, y(, z)))
#         :param net_output:
#         :param gt:
#         :param axes: can be (, ) = no summation
#         :param mask: mask must be 1 for valid pixels and 0 for invalid pixels
#         :param square: if True then fp, tp and fn will be squared before summation
#         :return:
#         """
#
#
#         shp_x = net_output.shape
#         shp_y = gt.shape
#
#
#
#         y_onehot = gt
#
#
#
#
#         tp = net_output * y_onehot
#         fp = net_output * (1 - y_onehot)
#         fn = (1 - net_output) * y_onehot
#         tn = (1 - net_output) * (1 - y_onehot)
#
#
#
#         tp = self.sum_tensor(tp, axes, keepdims=False)
#         fp = self.sum_tensor(fp, axes, keepdims=False)
#         fn = self.sum_tensor(fn, axes, keepdims=False)
#         tn = self.sum_tensor(tn, axes, keepdims=False)
#
#         return tp, fp, fn, tn
#
#
#     def construct(self, x, y, loss_mask=None):
#
#
#         #NPU
#         axes = [2,3,4]
#
#
#         x = self.apply_nonlin(x)
#
#         tp, fp, fn, _ = self.get_tp_fp_fn_tn(x, y, axes, loss_mask, False)
#
#         nominator = 2 * tp + self.smooth
#         denominator = 2 * tp + fp + fn + self.smooth
#
#         dc = nominator / (denominator + 1e-8)
#
#
#         dc = self.get_dice(dc)
#
#         dc = self.mean(dc)
#
#         return 1-dc



class SoftDiceLoss(nn.DiceLoss):
    def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1., loss_type='3d'):
        super(SoftDiceLoss, self).__init__()
        self.mean = ops.ReduceMean()
        self.do_bg = do_bg
        self.batch_dice = batch_dice
        self.apply_nonlin = apply_nonlin
        self.smooth = smooth
        self.reshape = ops.Reshape()
        self.zeros = ops.Zeros()

    def construct(self, input: Tensor, target: Tensor) -> Tensor:
        # if len(target.shape) == len(input.shape):
        #     assert target.shape[1] == 1
        #     target = target[:, 0]
        return super().construct(input, target)






class DC_and_CE_loss(LossBase):
    def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", square_dice=False, weight_ce=1, weight_dice=1,
                 log_dice=False, ignore_label=None):
        """
        CAREFUL. Weights for CE and Dice do not need to sum to one. You can set whatever you want.
        :param soft_dice_kwargs:
        :param ce_kwargs:
        :param aggregate:
        :param square_dice:
        :param weight_ce:
        :param weight_dice:
        """
        super(DC_and_CE_loss, self).__init__()
        if ignore_label is not None:
            assert not square_dice, 'not implemented'
            ce_kwargs['reduction'] = 'none'
        self.log_dice = log_dice
        self.weight_dice = weight_dice
        self.weight_ce = weight_ce
        self.aggregate = aggregate

        if soft_dice_kwargs["loss_type"] =='3d':
            self.ce = RobustCrossEntropyLoss(**ce_kwargs)
        else:
            self.ce = RobustCrossEntropyLoss2d(**ce_kwargs)


        self.transpose = ops.Transpose()
        self.ignore_label = ignore_label
        self.reshape = ops.Reshape()

      
        self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)



    def construct(self, net_output, target):
        """
        target must be b, c, x, y(, z) with c=1
        :param net_output:
        :param target:
        :return:
        """
        #NPU 
      
        mask = None

        dc_loss = self.dc(net_output, target) if self.weight_dice != 0 else 0
        # print("net_output_0",net_output[0][0][0][0])
        # print("net_output_1", net_output[0][1][0][0])
        # print("net_output_2",net_output[0][2][0][0])
        target = target

        ce_loss = self.ce(net_output, target) if self.weight_ce != 0 else 0


        result = self.weight_ce * ce_loss + self.weight_dice * dc_loss


        return result


