import mindspore as ms
import mindspore.nn as nn
import mindspore.ops.operations as F
from mindspore.nn.loss.loss import _Loss
from mindspore import ops
import time


class CrossEntropyWithLogits(nn.LossBase):
    def __init__(self, multi_scale_train=False):
        super(CrossEntropyWithLogits, self).__init__()
        self.transpose_fn = F.Transpose()
        self.reshape_fn = F.Reshape()
        self.softmax_cross_entropy_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
        self.cast = F.Cast()
        self.multi_scale_train = multi_scale_train
        self.multi_pred_weights = [0.5, 0.5, 0.5, 0.8, 1.0]
        self.expand_dim = ops.ExpandDims()


    def construct(self, logits, label):
        print('execute to loss ...')
        # NCHW->NHWC
        # origin_label_h, origin_label_w = tuple(int(x) for x in label.shape[1:])
        label_shape = label.shape
        # print("logits shape: ", logits.shape)
        # time.sleep(10)

        origin_label_h, origin_label_w = label_shape[1], label_shape[2]

        if self.multi_scale_train:
            # print("multi scale train mode....")
            temp_loss = ms.Tensor(0.0, dtype=ms.float32)
            for i, logit in enumerate(logits):
                # logit_h, logit_w = tuple(int(x) for x in logit.shape[2:])
                logit_shape = logit.shape
                logit_h, logit_w = logit_shape[-2], logit_shape[-1]

                if logit_h != origin_label_h or logit_w != origin_label_w:
                    temp_label = label
                    if len(temp_label.shape) == 3:
                        temp_label = self.expand_dim(temp_label, 1)

                    resize_mode = F.ResizeNearestNeighbor(size=(logit_h, logit_w), align_corners=False)
                    temp_label = resize_mode(temp_label)
                    temp_loss = temp_loss + self.multi_pred_weights[i] * self._cal_loss(logit, temp_label)
                else:
                    temp_loss = temp_loss + self.multi_pred_weights[i] * self._cal_loss(logit, label)

            loss = temp_loss

            # print("=============================cd loss : ", loss)
            # print("=============================cd loss : ", loss)
            # print("=============================cd loss : ", loss)
            # print("=============================cd loss : ", loss)
            # print("=============================cd loss : ", loss)
            # print("=============================cd loss : ", loss)

            return self.get_loss(loss)
        else:

            if isinstance(logits, list):
                # time.sleep(10)
                # print("use list last element....")
                logit = logits[-1]
            elif isinstance(logits, ms.Tensor):
                # time.sleep(10)

                # print("point only element....")
                logit = logits
            else:
                raise TypeError("No support data type")

            loss = self._cal_loss(logit, label)
            return self.get_loss(loss)

    def _cal_loss(self, logit, label):
  

        logit = self.transpose_fn(logit, (0, 2, 3, 1))
        logit = self.cast(logit, ms.float32)
        label = self.cast(label, ms.int32)

        loss = self.reduce_mean(
            self.softmax_cross_entropy_loss(self.reshape_fn(logit, (-1, 2)), self.reshape_fn(label, (-1,))))
        # print("ce loss:  ", loss)

        return loss