# CE-loss
#import torch.nn as nn
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import dtype as mstype
import mindspore
import numpy as np
import mindspore as ms
from mindspore.ops import operations as P
from src.DualGCNNet import DualSeg_res101


class SoftmaxCrossEntropyLoss(nn.Cell):
    """SoftmaxCrossEntropyLoss"""

    def __init__(self, num_cls=21, ignore_label=255):
        super(SoftmaxCrossEntropyLoss, self).__init__()
        self.one_hot = P.OneHot(axis=-1)
        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.cast = P.Cast()
        self.ce = nn.SoftmaxCrossEntropyWithLogits()
        self.not_equal = P.NotEqual()
        self.num_cls = num_cls
        self.ignore_label = ignore_label
        self.mul = P.Mul()
        self.sum = P.ReduceSum(False)
        self.div = P.RealDiv()
        self.transpose = P.Transpose()
        self.reshape = P.Reshape()

    def construct(self, logits, labels):
        """SoftmaxCrossEntropyLoss.construct"""
        #print("pred", logits.shape, logits)
        #print("target", labels.shape, labels)
        labels_int = self.cast(labels, mstype.int32)
        labels_int = self.reshape(labels_int, (-1,))
        logits_ = self.transpose(logits, (0, 2, 3, 1))
        logits_ = self.reshape(logits_, (-1, self.num_cls))
        weights = self.not_equal(labels_int, self.ignore_label)
        weights = self.cast(weights, mstype.float32)
        one_hot_labels = self.one_hot(labels_int, self.num_cls, self.on_value, self.off_value)
        loss = self.ce(logits_, one_hot_labels)
        loss = self.mul(weights, loss)
        loss = self.div(self.sum(loss), self.sum(weights))
        return loss

class OhemCrossEntropy2dTensor(nn.Cell):
    def __init__(self, ignore_label, ncls, reduction='mean', thresh=0.6, min_kept=256,
                 down_ratio=1, use_weight=False):
        super(OhemCrossEntropy2dTensor, self).__init__()
        self.ignore_label = ignore_label
        self.num_cls = ncls
        self.thresh = float(thresh)
        self.min_kept = int(min_kept)
        self.down_ratio = down_ratio
        self.reshape = mindspore.ops.Reshape()
        self.ne = mindspore.ops.NotEqual()
        self.sum = mindspore.ops.ReduceSum(keep_dims=False)
        self.softmax = mindspore.ops.Softmax(axis=1)
        self.transpose = mindspore.ops.Transpose()
        self.mul = mindspore.ops.Mul()
        self.sort = mindspore.ops.Sort()
        self.le = mindspore.ops.LessEqual()
        self.min = mindspore.ops.Minimum()
        self.criterion = SoftmaxCrossEntropyLoss(num_cls=self.num_cls, ignore_label=self.ignore_label)
        self.is_nan = mindspore.ops.IsNan()

    def construct(self, pred, target):
        b, c, h, w = pred.shape
        target = self.reshape(target, (-1,))
        valid_mask = self.ne(target, self.ignore_label)
        target = target * valid_mask.astype("int64")
        num_valid = valid_mask.astype("float32").sum()
        #print("num_valid", num_valid)

        prob = self.softmax(pred)
        prob = self.transpose(prob, (1, 0, 2, 3))
        prob = self.reshape(prob, (c, -1))

        if self.min_kept > num_valid:
            pass
            #print('Labels: {}'.format(num_valid))
        elif num_valid > 0:
            prob = self.mul(prob, valid_mask) + (~valid_mask)
            ran = nn.Range(start=0, limit=len(target), delta=1)
            t = ran().astype("int64")
            mask_prob = prob[target.astype("int64"), t]
            threshold = self.thresh
            if self.min_kept > 0:
                #_, index = mask_prob.sort()
                index = self.sort(mask_prob)[1]
                if len(index)<self.min_kept:
                    threshold_index = index[len(index) - 1]
                else:
                    threshold_index = index[self.min_kept - 1]
                #threshold_index = index[self.min(len(index).astype("int64"), self.min_kept) - 1]
                if mask_prob[threshold_index] > self.thresh:
                    threshold = mask_prob[threshold_index]
                kept_mask = self.le(mask_prob, threshold)
                target = target * kept_mask.astype("int64")
                valid_mask = valid_mask * kept_mask.astype("int64")
        valid_mask = valid_mask.astype("bool")
        target = self.mul(target, valid_mask) + self.mul(self.ignore_label, ~valid_mask)
        target = self.reshape(target,(b, h, w)).astype("float32")
    
        #print("target", target.shape, self.is_nan(target).astype("float32").sum())
        return self.criterion(pred, target)


class CriterionDSN(SoftmaxCrossEntropyLoss):
    def __init__(self, args, net, ignore_index=255,reduce=True):
        super(CriterionDSN, self).__init__()
        self.ignore_index = ignore_index
        self.reduce = reduce
        self.net = net
    def construct(self, image, target):
        preds = self.net(image)
        scale_pred = preds[0]
        loss1 = super(CriterionDSN, self).construct(scale_pred, target)
        scale_pred = preds[1]
        loss2 = super(CriterionDSN, self).construct(scale_pred, target)

        return loss1 + loss2 * 0.4


class CriterionOhemDSN(nn.Cell):
    '''
    DSN : We need to consider two supervision for the models.
    '''
    def __init__(self, args, reduce=True):
        super(CriterionOhemDSN, self).__init__()
        self.ignore_index = 255
        self.ncls = args.num_classes
        self.criterion1 = OhemCrossEntropy2dTensor(self.ignore_index, self.ncls, thresh=args.ohem_thres, min_kept=args.ohem_keep)
        self.criterion2 = SoftmaxCrossEntropyLoss(num_cls=self.ncls, ignore_label=self.ignore_index)
        self.net = DualSeg_res101(num_classes=args.num_classes, is_train=True)

    def construct(self, image, target):
        b, h, w = target.shape
        preds = self.net(image)
        resize1 = mindspore.ops.ResizeBilinear((h, w), align_corners=True)
        scale_pred = resize1(preds[0])
        loss1 = self.criterion1(scale_pred, target)
        scale_pred = resize1(preds[1])
        loss2 = self.criterion2(scale_pred, target)

        return loss1 + loss2 * 0.4
