# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""PointPillarsNet Loss set up"""

import numpy as np
from mindspore import nn
from mindspore import numpy as mnp
from mindspore import ops
from mindspore.common import dtype as mstype
from mindspore.common.tensor import Tensor
from mindspore.ops.primitive import constexpr


@constexpr
def _create_off_value():
    """create off value"""
    return Tensor(0.0, mstype.float32)


def indices_to_dense_vector(indices,
                            size,
                            indices_value=1.,
                            default_value=0):
    """Creates dense vector with indices set to specific value and rest to zeros.

    This function exists because it is unclear if it is safe to use
      tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
    with indices which are not ordered.
    This function accepts a dynamic size (e.g. tf.shape(tensor)[0])

    Args:
      indices: 1d Tensor with integer indices which are to be set to
        indices_values.
      size: scalar with size (integer) of output Tensor.
      indices_value: values of elements specified by indices in the output vector
      default_value: values of other elements in the output vector.

    Returns:
      dense 1D Tensor of shape [size] with indices set to indices_values and the
      rest set to default_value.
    """
    dense = ops.Zeros()(size).fill(default_value)
    dense[indices] = indices_value

    return dense


def _sigmoid_cross_entropy_with_logits(logits, labels):
    """sigmoid cross entropy with logits"""
    loss = ops.clip_by_value(
        logits,
        clip_value_min=_create_off_value(),
        clip_value_max=logits.max()
    ) - logits * labels.astype(logits.dtype)
    loss += ops.Log1p()(ops.Exp()(-ops.Abs()(logits)))
    return loss


def _softmax_cross_entropy_with_logits(logits, labels):
    """softmax cross entropy with logits"""
    loss_ftor = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
    loss = loss_ftor(logits, ops.ArgMaxWithValue(axis=-1)(labels)[0].astype(mstype.int32))
    return loss


class SigmoidFocalClassificationLoss(nn.Cell):
    """Sigmoid focal cross entropy loss.

    Focal loss down-weights well classified examples and focuses on the hard
    examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition.
    """

    def __init__(self, gamma=2.0, alpha=0.25):
        super().__init__()
        self._alpha = alpha
        self._gamma = gamma

    def construct(self,
                  prediction_tensor,
                  target_tensor,
                  weights,
                  class_indices=None):
        """Compute loss function."""
        weights = ops.ExpandDims()(weights, 2)
        if class_indices is not None:
            weights *= indices_to_dense_vector(
                class_indices,
                prediction_tensor.shape[2]
            ).view(1, 1, -1).astype(prediction_tensor.dtype)
        per_entry_cross_ent = (
            _sigmoid_cross_entropy_with_logits(labels=target_tensor, logits=prediction_tensor)
        )
        prediction_probabilities = ops.Sigmoid()(prediction_tensor)
        p_t = ((target_tensor * prediction_probabilities) +
               ((1 - target_tensor) * (1 - prediction_probabilities)))
        modulating_factor = 1.0
        if self._gamma:
            modulating_factor = ops.Pow()(1.0 - p_t, self._gamma)
        alpha_weight_factor = 1.0
        if self._alpha is not None:
            alpha_weight_factor = (target_tensor * self._alpha +
                                   (1 - target_tensor) * (1 - self._alpha))

        focal_cross_entropy_loss = modulating_factor * alpha_weight_factor * per_entry_cross_ent
        return focal_cross_entropy_loss * weights


class WeightedSmoothL1LocalizationLoss(nn.Cell):
    """Smooth L1 localization loss function.

    The smooth L1_loss is defined elementwise as .5 x^2 if |x|<1 and |x|-.5
    otherwise, where x is the difference between predictions and target.

    See also Equation (3) in the Fast R-CNN paper by Ross Girshick (ICCV 2015)
    """

    def __init__(self, sigma=3.0, code_weights=None, codewise=True):
        super().__init__()
        self._sigma = sigma
        if code_weights is not None:
            self._code_weights = np.array(code_weights, dtype=np.float32)
            self._code_weights = Tensor(self._code_weights)
        else:
            self._code_weights = None
        self._codewise = codewise

    def construct(self, prediction_tensor, target_tensor, weights=None):
        """Compute loss function.

        Args:
            prediction_tensor: A float tensor of shape [batch_size, num_anchors,
              code_size] representing the (encoded) predicted locations of objects.
            target_tensor: A float tensor of shape [batch_size, num_anchors,
              code_size] representing the regression targets
            weights: a float tensor of shape [batch_size, num_anchors]

        Returns:
            loss: a float tensor of shape [batch_size, num_anchors] tensor
                representing the value of the loss function.
        """
        diff = prediction_tensor - target_tensor
        if self._code_weights is not None:
            code_weights = self._code_weights.astype(prediction_tensor.dtype)
            diff = code_weights.view(1, 1, -1) * diff
        abs_diff = ops.Abs()(diff)
        abs_diff_lt_1 = ops.LessEqual()(abs_diff, 1 / (self._sigma ** 2)).astype(abs_diff.dtype)
        loss = (abs_diff_lt_1 * 0.5 * ops.Pow()(abs_diff * self._sigma, 2)
                + (abs_diff - 1 / (2 * (self._sigma ** 2))) * (1. - abs_diff_lt_1))
        if self._codewise:
            anchorwise_smooth_l1norm = loss
            if weights is not None:
                anchorwise_smooth_l1norm *= ops.ExpandDims()(weights, -1)
        else:
            anchorwise_smooth_l1norm = ops.ReduceSum()(loss, 2)
            if weights is not None:
                anchorwise_smooth_l1norm *= weights
        return anchorwise_smooth_l1norm


class WeightedSoftmaxClassificationLoss(nn.Cell):
    """Softmax loss function."""

    def __init__(self, logit_scale=1.0):
        """Constructor.

        Args:
          logit_scale: When this value is high, the prediction is "diffused" and
                       when this value is low, the prediction is made peakier.
                       (default 1.0)
        """
        super().__init__()
        self._logit_scale = logit_scale

    def construct(self, prediction_tensor, target_tensor, weights):
        """Compute loss function.

        Args:
          prediction_tensor: A float tensor of shape [batch_size, num_anchors,
            num_classes] representing the predicted logits for each class
          target_tensor: A float tensor of shape [batch_size, num_anchors,
            num_classes] representing one-hot encoded classification targets
          weights: a float tensor of shape [batch_size, num_anchors]

        Returns:
          loss: a float tensor of shape [batch_size, num_anchors]
            representing the value of the loss function.
        """
        num_classes = prediction_tensor.shape[-1]
        prediction_tensor = ops.Div()(prediction_tensor, self._logit_scale)
        per_row_cross_ent = _softmax_cross_entropy_with_logits(
            labels=target_tensor.view(-1, num_classes),
            logits=prediction_tensor.view(-1, num_classes)
        )
        return per_row_cross_ent.view(weights.shape) * weights



def prepare_loss_weights(labels,
                         pos_cls_weight=1.0,
                         neg_cls_weight=1.0,
                         dtype=mstype.float32):
    """get cls_weights and reg_weights from labels."""
    cared = labels >= 0
    # cared: [N, num_anchors]
    positives = labels > 0
    negatives = labels == 0
    negative_cls_weights = negatives.astype(dtype) * neg_cls_weight
    cls_weights = negative_cls_weights + pos_cls_weight * positives.astype(dtype)
    reg_weights = positives.astype(dtype)
    pos_normalizer = positives.sum(1, keepdims=True).astype(dtype)
    reg_weights /= ops.clip_by_value(
        pos_normalizer,
        clip_value_min=_create_on_value(),
        clip_value_max=pos_normalizer.max()
    )
    cls_weights /= ops.clip_by_value(
        pos_normalizer,
        clip_value_min=_create_on_value(),
        clip_value_max=pos_normalizer.max()
    )
    return cls_weights, reg_weights, cared


@constexpr
def _create_on_value():
    """create on value"""
    return Tensor(1.0, mstype.float32)


@constexpr
def _create_off_value():
    """create off value"""
    return Tensor(0.0, mstype.float32)


@constexpr
def _log16():
    """log(16)"""
    return ops.Log()(Tensor(16.0, mstype.float32))


def create_loss(loc_loss_ftor,
                cls_loss_ftor,
                box_preds,
                cls_preds,
                cls_targets,
                cls_weights,
                reg_targets,
                reg_weights,
                num_class,
                encode_background_as_zeros=True,
                encode_rad_error_by_sin=True,
                box_code_size=7):
    """create loss"""
    batch_size = box_preds.shape[0]
    box_preds = box_preds.view(batch_size, -1, box_code_size)
    if encode_background_as_zeros:
        cls_preds = cls_preds.view(batch_size, -1, num_class)
    else:
        cls_preds = cls_preds.view(batch_size, -1, num_class + 1)
    cls_targets = cls_targets.squeeze(-1)
    one_hot_targets = ops.OneHot()(
        cls_targets,
        num_class + 1,
        _create_on_value(),
        _create_off_value()
    )
    if encode_background_as_zeros:
        one_hot_targets = one_hot_targets[..., 1:]
    if encode_rad_error_by_sin:
        # sin(a - b) = sinacosb-cosasinb
        box_preds, reg_targets = add_sin_difference(box_preds, reg_targets)
    loc_losses = loc_loss_ftor(box_preds, reg_targets, weights=reg_weights)  # [N, M]
    cls_losses = cls_loss_ftor(cls_preds, one_hot_targets, weights=cls_weights)  # [N, M]
    return loc_losses, cls_losses


def add_sin_difference(boxes1, boxes2):
    """add sin difference"""
    rad_pred_encoding = ops.Sin()(boxes1[..., -1:]) * ops.Cos()(boxes2[..., -1:])
    rad_tg_encoding = ops.Cos()(boxes1[..., -1:]) * ops.Sin()(boxes2[..., -1:])
    boxes1 = ops.Concat(axis=-1)([boxes1[..., :-1], rad_pred_encoding])
    boxes2 = ops.Concat(axis=-1)([boxes2[..., :-1], rad_tg_encoding])
    return boxes1, boxes2


def _get_pos_neg_loss(cls_loss, labels):
    """get pos neg loss"""
    # cls_loss: [N, num_anchors, num_class]
    # labels: [N, num_anchors]
    batch_size = cls_loss.shape[0]
    if cls_loss.shape[-1] == 1 or len(cls_loss.shape) == 2:
        cls_pos_loss = (labels > 0).astype(cls_loss.dtype) * cls_loss.view(batch_size, -1)
        cls_neg_loss = (labels == 0).astype(cls_loss.dtype) * cls_loss.view(batch_size, -1)
        cls_pos_loss = cls_pos_loss.sum() / batch_size
        cls_neg_loss = cls_neg_loss.sum() / batch_size
    else:
        cls_pos_loss = cls_loss[..., 1:].sum() / batch_size
        cls_neg_loss = cls_loss[..., 0].sum() / batch_size
    return cls_pos_loss, cls_neg_loss


def get_direction_target(anchors, reg_targets, one_hot=True):
    """get direction target"""
    batch_size = reg_targets.shape[0]
    anchors = anchors.view(batch_size, -1, 7)
    rot_gt = reg_targets[..., -1] + anchors[..., -1]
    dir_cls_targets = (rot_gt > 0).astype(mstype.int64)
    if one_hot:
        dir_cls_targets = ops.OneHot()(
            dir_cls_targets,
            2,
            _create_on_value(),
            _create_off_value()
        )
    return dir_cls_targets


def get_paddings_indicator(actual_num, max_num, axis=0):
    """Create boolean mask by actually number of a padded tensor"""

    actual_num = ops.ExpandDims()(actual_num, axis + 1)
    # tiled_actual_num: [N, M, 1]
    max_num_shape = [1] * len(actual_num.shape)
    max_num_shape[axis + 1] = -1
    max_num = mnp.arange(0, max_num, dtype=mstype.int32).view(*max_num_shape)
    paddings_indicator = actual_num > max_num
    # paddings_indicator shape: [batch_size, max_num]
    return paddings_indicator

class PointPillarsWithLossCell(nn.Cell):
    """PointPillars with loss cell"""
    def __init__(self, network, cfg):
        super().__init__()
        self.network = network
        self.cfg = cfg
        loss_cfg = cfg['loss']
        self.loss_cls = SigmoidFocalClassificationLoss(
            gamma=loss_cfg['classification_loss']['gamma'],
            alpha=loss_cfg['classification_loss']['alpha']
        )
        self.loss_loc = WeightedSmoothL1LocalizationLoss(
            sigma=loss_cfg['localization_loss']['sigma'],
            code_weights=loss_cfg['localization_loss']['code_weight']
        )
        self.loss_dir = WeightedSoftmaxClassificationLoss()
        self.w_cls_loss = loss_cfg['classification_weight']
        self.w_loc_loss = loss_cfg['localization_weight']
        self.w_dir_loss = cfg['direction_loss_weight']
        self._pos_cls_weight = cfg['pos_class_weight']
        self._neg_cls_weight = cfg['neg_class_weight']
        self.code_size = network.code_size

    def construct(self, *args):
        """forward graph"""
        voxels, num_points, coors, bev_map, labels, reg_targets, anchors = args
        batch_size_dev = labels.shape[0]
        preds = self.network(voxels, num_points, coors, bev_map)
        if self.cfg['use_direction_classifier']:
            box_preds, cls_preds, dir_cls_preds = preds
            dir_targets = get_direction_target(anchors, reg_targets)
            dir_logits = dir_cls_preds.view(batch_size_dev, -1, 2)
            weights = (labels > 0).astype(dir_logits.dtype)
            weights /= ops.clip_by_value(
                weights.sum(-1, keepdims=True),
                clip_value_min=_create_on_value(),
                clip_value_max=weights.sum(-1, keepdims=True).astype(mstype.float32).max()
            )
            dir_loss = self.loss_dir(dir_logits, dir_targets, weights=weights)
            dir_loss = dir_loss.sum() / batch_size_dev
            loss = dir_loss * self.w_dir_loss
        else:
            loss = 0
            box_preds, cls_preds = preds

        cls_weights, reg_weights, cared = prepare_loss_weights(
            labels,
            pos_cls_weight=self._pos_cls_weight,
            neg_cls_weight=self._neg_cls_weight,
            dtype=voxels.dtype
        )
        cls_targets = labels * cared.astype(labels.dtype)
        cls_targets = ops.ExpandDims()(cls_targets, -1)

        loc_loss, cls_loss = create_loss(
            self.loss_loc,
            self.loss_cls,
            box_preds=box_preds,
            cls_preds=cls_preds,
            cls_targets=cls_targets,
            cls_weights=cls_weights,
            reg_targets=reg_targets,
            reg_weights=reg_weights,
            num_class=self.cfg['num_class'],
            encode_rad_error_by_sin=self.cfg['encode_rad_error_by_sin'],
            encode_background_as_zeros=self.cfg['encode_background_as_zeros'],
            box_code_size=self.code_size,
        )
        loc_loss_reduced = loc_loss.sum() / batch_size_dev
        loc_loss_reduced *= self.w_loc_loss
        cls_pos_loss, cls_neg_loss = _get_pos_neg_loss(cls_loss, labels)
        cls_pos_loss /= self._pos_cls_weight
        cls_neg_loss /= self._neg_cls_weight
        cls_loss_reduced = cls_loss.sum() / batch_size_dev
        cls_loss_reduced *= self.w_cls_loss
        loss += loc_loss_reduced + cls_loss_reduced
        return loss