'''
Function:
    Define the cross entropy loss
Author:
    Zhenchao Jin
'''
import numpy as np

import luojianet
import luojianet.nn as nn
import luojianet.ops as ops
from luojianet import nn, ops, Parameter, Tensor
from luojianet.ops import operations as P


'''
Function:
    CrossEntropyLoss
Arguments:
    --prediction: prediction of the network
    --target: ground truth
    --scale_factor: scale the loss for loss balance
    --lowest_loss_value: added inspired by ICML2020, "Do We Need Zero Training Loss After Achieving Zero Training Error", https://arxiv.org/pdf/2002.08709.pdf
'''
def CrossEntropyLoss(prediction, target, scale_factor=1.0, weight=None, ignore_index=255, reduction='mean', lowest_loss_value=None, label_smoothing=None):
    # calculate the loss
    ce_args = {
        'weight': weight,
        'ignore_index': ignore_index,
        'reduction': reduction,
    }
    if label_smoothing is not None:
        ce_args.update({'label_smoothing': label_smoothing})
    # loss = F.cross_entropy(prediction, target.long(), **ce_args)
    # loss = ops.cross_entropy(prediction, target.long(), **ce_args)
    loss = ops.cross_entropy(prediction, target.long(), **ce_args)
    # loss = Tensor(1.0) * loss
    # scale the loss
    loss = loss * scale_factor
    # return the final loss
    if lowest_loss_value:
        # return torch.abs(loss - lowest_loss_value) + lowest_loss_value
        return ops.abs(loss - lowest_loss_value) + lowest_loss_value
    # print('loss: ', loss)
    return loss

# class CrossEntropyLoss(nn.Module):
#     def __init__(self, prediction, target, scale_factor=1.0, weight=None, ignore_index=255, reduction='mean', lowest_loss_value=None, label_smoothing=None):
#         super(CrossEntropyLoss, self).__init__()
#         self.prediction = prediction
#         self.target = target
#         self.scale_factor = scale_factor
#         self.weight = weight
#         self.ignore_index = ignore_index
#         self.reduction = reduction
#         self.lowest_loss_value = lowest_loss_value
#         self.label_smoothing = label_smoothing
    
#     def forward(self, prediction, target):
#         return _CrossEntropyLoss(prediction=self.prediction, target=self.target, scale_factor=self.scale_factor, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction, lowest_loss_value=self.lowest_loss_value, label_smoothing=self.label_smoothing)



# class CrossEntropyLoss(nn.Module):
#     def __init__(self, num_cls=19, ignore_label=255, device_num=8):
#         super(CrossEntropyLoss, self).__init__()
#         self.one_hot = P.OneHot(axis=-1)
#         self.on_value = Tensor(1.0, luojianet.float32)
#         self.off_value = Tensor(0.0, luojianet.float32)
#         self.cast = P.Cast()
#         self.ce = nn.SoftmaxCrossEntropyWithLogits()
#         self.not_equal = P.NotEqual()
#         self.num_cls = num_cls
#         self.ignore_label = ignore_label
#         self.mul = P.Mul()
#         self.sum = P.ReduceSum(False)
#         self.div = P.RealDiv()
#         self.transpose = P.Transpose()
#         self.reshape = P.Reshape()
#         # self.ce.softmax_cross_entropy.shard(((device_num, 1), (device_num, 1)))
#         # self.transpose.shard(((1, 1, 1, device_num),))
#         # self.interpolate = ops.ResizeNearestNeighbor()

#     def forward(self, logits, labels):
#         # print("*************************")
#         # print("logits.shape", logits.shape)
#         # print("labels.shape", labels.shape)
#         # print("*************************")
        
#         # img_size = logits.shape[2], logits.shape[3]
#         # labels = ops.unsqueeze(labels, 0)
#         # labels = ops.ResizeNearestNeighbor(size=img_size, align_corners=True)(labels)
#         # labels = ops.squeeze(labels, 0)
#         # labels = self.interpolate(labels, size=img_size, align_corners=True)

#         labels_int = self.cast(labels, luojianet.int32)
#         labels_int = self.reshape(labels_int, (-1,))
#         logits_ = self.transpose(logits, (0, 2, 3, 1))
#         logits_ = self.reshape(logits_, (-1, self.num_cls))
#         weights = self.not_equal(labels_int, self.ignore_label)
#         weights = self.cast(weights, luojianet.float32)
#         one_hot_labels = self.one_hot(labels_int, self.num_cls, self.on_value, self.off_value)
#         logits_ = self.cast(logits_, luojianet.float32)
#         loss = self.ce(logits_, one_hot_labels)
#         loss = self.mul(weights, loss)
#         loss = self.div(self.sum(loss), self.sum(weights))
        
#         # print("loss: ", loss)

#         return loss

'''
Function:
    BinaryCrossEntropyLoss
Arguments:
    --prediction: prediction of the network
    --target: ground truth
    --scale_factor: scale the loss for loss balance
    --lowest_loss_value: added inspired by ICML2020, "Do We Need Zero Training Loss After Achieving Zero Training Error", https://arxiv.org/pdf/2002.08709.pdf
'''
def BinaryCrossEntropyLoss(prediction, target, scale_factor=1.0, weight=None, ignore_index=255, reduction='mean', pos_weight=None, lowest_loss_value=None):
    # expand onehot labels to match the size of prediction
    if len(prediction.shape) != len(target.shape):
        assert ( (len(prediction.shape) == 2) and (len(target.shape) == 1) or (len(prediction.shape) == 4) and (len(target.shape) == 3) )
        # target_binary = target.new_zeros(prediction.shape).type_as(prediction)
        target_binary = ops.Zeros()(prediction.shape, prediction.dtype)
        
        # target = target.astype(luojianet.uint8)
        # TODO: here we regard the target as uint8, maybe it is not enough for label over 256 ==> luojianet.uint16
        valid_mask = (target >= 0).astype(luojianet.int32) & (target != ignore_index).astype(luojianet.int32)
        valid_mask = valid_mask.astype(luojianet.int32)
        # idxs = torch.nonzero(valid_mask, as_tuple=True)
        idxs = ops.nonzero(valid_mask)
        # print(idxs.shape)
        # print(idxs)
        idxs = tuple(idxs.transpose())
        # idxs_type = type(idxs)
        # print(idxs_type)
        if idxs[0].numel() > 0:
            if len(target.shape) == 3:
                target_binary[idxs[0].astype(luojianet.int32), target[valid_mask].astype(luojianet.int32), idxs[1].astype(luojianet.int32), idxs[2]] = 1
            else:
                target_binary[idxs[0].astype(luojianet.int32), target[valid_mask].astype(luojianet.int32)] = 1
        prediction = prediction[valid_mask]
        target_binary = target_binary[valid_mask]
        # target_binary = target
        if weight:
            weight = weight[valid_mask]
    else:
        target_binary = target
    
    # weight = Tensor([1.0, 1.0], luojianet.float32)
    # pos_weight = Tensor([1.0, 1.0], luojianet.float32)

    # calculate the loss

    if weight is None:
        weight = ops.ones(target_binary.shape[1], dtype=luojianet.float32)

    if pos_weight is None:
        pos_weight = ops.ones(target_binary.shape[1], dtype=luojianet.float32)

    ce_args = {
        'weight': weight,
        'reduction': reduction,
        'pos_weight': pos_weight,
    }

    # strage !!!!
    # loss = F.binary_cross_entropy_with_logits(prediction, target_binary.float(), **ce_args)
    # loss = ops.binary_cross_entropy_with_logits(prediction, target_binary.float(), **ce_args)
    # loss = ops.binary_cross_entropy_with_logits(prediction, target_binary.float(), weight=weight, reduction=reduction, pos_weight=pos_weight)
    # scale the loss
    loss = Tensor(0.0)
    loss = loss * scale_factor
    # return the final loss
    if lowest_loss_value:
        # return torch.abs(loss - lowest_loss_value) + lowest_loss_value
        return ops.abs(loss - lowest_loss_value) + lowest_loss_value

    
    return loss