import sys
import mindspore as ms
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor, context
import mindspore.numpy as mnp
import mindspore.ops as ops
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.common.initializer import Zero
import numpy
import mindspore.common.dtype as mstype

      
def smoothl1_loss(error, delta=1.0):
    """Smooth L1 loss.
    x = error = pred - gt or dist(pred,gt)
    0.5 * |x|^2                 if |x|<=d
    |x| - 0.5 * d               if |x|>d
    """
    abs = ms.ops.Abs()
    diff = abs(error)
    loss = np.where(diff < delta, 0.5 * diff *
                    diff / delta, diff - 0.5 * delta)
    return loss


def get_loss(backbone_xyz, points_obj_cls_logits, size_gts, target_bboxes_mask, objectness_scores, point_obj_mask, pred_center, gt_center, heading_scores, angle_classes, heading_residual_normalized, gt_angle_residuals, point_instance_label, size_scores, size_classes, \
    size_residuals_normalized, size_residual_label, sem_cls_scores, target_bboxes_semcls, backbone_sample_idx, fps_sample_inx, mean_size_arr):
    """ Loss functions
    
    objectness_scores: # (batch_size, num_proposal, 1)
    point_obj_mask: (batch_size, N) from dataset
    
    
    pred_center： (batch_size, num_proposal, 3)
    gt_center: (MAX_NUM_OBJ,3)
    
    angle_classes
    heading_scores
    
    heading_residual_normalized
    gt_angle_residuals
    
    size_scores: (batch_size, num_proposal, num_size_cluster)
    size_classes: (MAX_NUM_OBJ,)
    
    size_residuals_normalized:  (batch_size, num_proposal, num_size_cluster, 3)
    size_residuals: (MAX_NUM_OBJ,3)
    
    sem_cls_scores: (batch_size, num_proposal, num_class)
    target_bboxes_semcls: (MAX_NUM_OBJ,)
    
    """
    B = gt_center.shape[0]
    K = backbone_xyz.shape[1]
    K2 = gt_center.shape[1]
    size_delta=0.111111111111
    center_delta = 0.04
    heading_delta = 1.0
    num_heading_bin = 1
    topk = 4
    
    # unpack labels
    squeeze = ops.Squeeze()
    unsqueeze = ops.ExpandDims()
    
    # kps loss
    # point_instance_label = end_points['point_instance_label']  # B, num_points
    object_assignment = ops.GatherD()(point_instance_label, 1, backbone_sample_idx)  # B, num_seed
    object_assignment[object_assignment < 0] = K2 - 1  # set background points to the last gt bbox
    # object_assignment_one_hot = ops.Zeros()((B, K, K2), ms.float32)
    object_assignment_one_hot = ops.OneHot()(object_assignment, K2, Tensor(1.0, ms.float32), Tensor(0.0, ms.float32))
    delta_xyz = ops.ExpandDims()(backbone_xyz, 2) - ops.ExpandDims()(gt_center, 1)  # (B, K, K2, 3)
    delta_xyz = delta_xyz / (ops.ExpandDims()(size_gts, 1) + 1e-6)  # (B, K, K2, 3)
    new_dist = ops.ReduceSum(keep_dims=False)(delta_xyz ** 2, axis=-1)
    euclidean_dist1 = ops.Sqrt()(new_dist + 1e-6)  # BxKxK2
    euclidean_dist1 = euclidean_dist1 * object_assignment_one_hot + \
        100 * (1 - object_assignment_one_hot)  # BxKxK2
    euclidean_dist1 = ops.Transpose()(euclidean_dist1, (0, 2, 1))
    box_label_mask = target_bboxes_mask
    topk_inds = ops.TopK()(-euclidean_dist1, topk)[1] * box_label_mask[:, :, None] + \
        (box_label_mask[:, :, None] - 1)  # BxK2xtopk
    topk_inds = ops.Cast()(topk_inds, ms.int64)  # BxK2xtopk
    topk_inds = topk_inds.view((B, -1))  # B, K2xtopk
    batch_inds = mnp.tile(ops.ExpandDims()(mnp.arange(B), 1), (1, K2 * topk))
    batch_inds = ops.Cast()(batch_inds, ms.int64)
    batch_topk_inds = ops.Stack(-1)([batch_inds, topk_inds]).view(-1, 2)
    
    objectness_label = ops.Zeros()((B, K + 1), ms.float32)
    objectness_label[batch_topk_inds[:, 0], batch_topk_inds[:, 1]] = 1
    # print('1: ', objectness_label)
    objectness_label = objectness_label[:, :K]
    objectness_label_mask = ops.GatherD()(point_instance_label, 1, backbone_sample_idx)  # B, num_seed
    objectness_label[objectness_label_mask < 0] = 0
    # print('2: ', objectness_label)
    
    
    criterion = SigmoidFocalClassificationLoss()
    KPS_loss = criterion(points_obj_cls_logits.view(B,K,1), ops.ExpandDims()(objectness_label, -1))
    # print(KPS_loss)
    # sys.exit()
    KPS_loss_sum = KPS_loss.mean()
    # print('mean: ', KPS_loss.mean())
    # cls_weights = ops.Cast()((objectness_label >= 0), ms.float32)
    # cls_normalizer = ops.Cast()(cls_weights.sum(axis=1, keepdims=True), ms.float32)
    # min_value = Tensor(1.0, ms.float32)
    # max_value = Tensor(51000000.0, ms.float32)
    # cls_weights /= ops.clip_by_value(cls_normalizer,  clip_value_min=min_value)
    # KPS_loss_sum = KPS_loss * ops.ExpandDims()(cls_weights, -1)
    # print(KPS_loss_sum.sum()/B)
    # sys.exit(0)
    

    # Obj loss
    criterion = SigmoidFocalClassificationLoss()
    backbone_obj_gt = ops.GatherD()(point_obj_mask, 1, backbone_sample_idx)  # (B,N) -> (B, 1024)
    fps_obj_gt = ops.GatherD()(backbone_obj_gt, 1, fps_sample_inx)  # (B,1024) -> (B, num_proposal)
    # label = P.OneHot()(fps_obj_gt, F.shape(objectness_scores)[-1], Tensor(1.0, mstype.float32), Tensor(0.0, mstype.float32))
    objectness_loss = criterion(objectness_scores, ops.ExpandDims()(ops.Cast()(fps_obj_gt, ms.float32), 2))
    # objectness_loss = P.SigmoidCrossEntropyWithLogits()(objectness_scores, ops.ExpandDims()(ops.Cast()(fps_obj_gt, ms.float32), 2))
    objectness_loss_sum = objectness_loss.mean()
    # objectness_loss_sum = objectness_loss.sum() / backbone_obj_gt.shape[0]
    
    
    
    # Compute center loss
    # criterion = ops.smooth_l1_loss(beta=center_delta)
    ## point assignment
    # # # objectness_label = fps_obj_gt
    backbone_instance_label = ops.GatherD()(point_instance_label, 1, backbone_sample_idx)  # (B,N) -> (B, 1024)
    fps_instance_label = ops.GatherD()(backbone_instance_label, 1, fps_sample_inx)  # (B,1024) -> (B, num_proposal)
    ## Set assignment
    ## (B,K) with values in 0,1,...,K2-1
    object_assignment = fps_instance_label
    object_assignment[object_assignment < 0] = K2 - 1  # set background points to the last gt bbox
    # pred_center
    # gt_center
    object_assignment_expand = ops.tile(ops.ExpandDims()(object_assignment, 2), (1, 1, 3))
    assigned_gt_center = ops.GatherD()(gt_center, 1, object_assignment_expand)  # (B, K2, 3) -> (B, K, 3)
    # center_loss = smoothl1_loss(assigned_gt_center - pred_center, delta=center_delta)  # (B,K)
    center_loss = F.smooth_l1_loss(pred_center, assigned_gt_center, beta=center_delta, reduction='none')
    center_loss = ops.ReduceSum(keep_dims=False)(center_loss * ops.ExpandDims()(fps_obj_gt, 2)) /(ops.ReduceSum(keep_dims=False)(ops.Cast()(fps_obj_gt, ms.float32)) + 1e-6)
    # Compute heading classification loss
    
    heading_class_label = ops.GatherD()(angle_classes, 1, object_assignment)  # select (B,K) from (B,K2)
    criterion_heading_class = nn.CrossEntropyLoss()
    heading_class_label = ops.Cast()(heading_class_label, ms.int32)
    heading_class_loss = criterion_heading_class(ops.Transpose()(heading_scores, (0, 2, 1)), heading_class_label)  # (B,K)
    heading_class_loss = ops.ReduceSum(keep_dims=False)(heading_class_loss * fps_obj_gt) / (ops.ReduceSum(keep_dims=False)(ops.Cast()(fps_obj_gt, ms.float32)) + 1e-6)
    
    # Compute heading residual loss
    heading_residual_label = ops.GatherD()(gt_angle_residuals, 1, object_assignment)  # select (B,K) from (B,K2)
    heading_residual_normalized_label = heading_residual_label / (np.pi / num_heading_bin)

    # Ref: https://discuss.pytorch.org/t/convert-int-into-one-hot-format/507/3
    heading_label_one_hot = Tensor(shape=(heading_class_label.shape[0], heading_class_label.shape[1], num_heading_bin), dtype=ms.float32, init=Zero())
    heading_label_one_hot = ops.OneHot()(heading_class_label, num_heading_bin, Tensor(1.0, ms.float32), Tensor(0.0, ms.float32))
    heading_residual_normalized = ops.ReduceSum(keep_dims=False)(heading_residual_normalized *  heading_label_one_hot, -1)

    # heading_residual_normalized_loss = heading_delta * smoothl1_loss(heading_residual_normalized_error,  delta=heading_delta)  # (B,K)
    # heading_residual_normalized_loss = ops.ReduceSum(keep_dims=False)(heading_residual_normalized_loss * objectness_label) / (ops.ReduceSum(keep_dims=False)(objectness_label) + 1e-6)
    heading_residual_normalized_loss = F.smooth_l1_loss(heading_residual_normalized, heading_residual_normalized_label, reduction='none')
    heading_residual_normalized_loss = ops.ReduceSum(keep_dims=False)(heading_residual_normalized_loss * fps_obj_gt) / (ops.ReduceSum(keep_dims=False)(ops.Cast()(fps_obj_gt, ms.float32)) + 1e-6)
    
    
    # Compute size classification loss
    # # size_scores: (batch_size, num_proposal, num_size_cluster)
    # # size_classes: (MAX_NUM_OBJ,)
    size_class_gt = ops.GatherD()(size_classes, 1, object_assignment)  # (B, K2) -> (B, K)
    size_class_gt = ops.Cast()(size_class_gt, ms.int32)
    criterion_size_class = nn.CrossEntropyLoss(reduction='none')
    size_class_loss = criterion_size_class(ops.Transpose()(ops.log_softmax(size_scores), (0, 2, 1)), size_class_gt)  # (B,K)
    size_class_loss = ops.ReduceSum(keep_dims=False)(size_class_loss * fps_obj_gt) / (ops.ReduceSum(keep_dims=False)(ops.Cast()(fps_obj_gt, ms.float32)) + 1e-6)
 
    
    # Compute size residual loss
    size_residual_label = ops.GatherD()(size_residual_label, 1, ops.tile(ops.ExpandDims()(object_assignment, -1), (1, 1, 3)))  # select (B, K2, 3) -> (B, K, 3)
    size_class_label = size_class_gt
    num_size_cluster = 18
    size_label_one_hot = ops.OneHot()(size_class_label, num_size_cluster, Tensor(1.0, ms.float32), Tensor(0.0, ms.float32))
    size_label_one_hot_tiled = ops.tile(ops.ExpandDims()(size_label_one_hot, -1), (1, 1, 1, 3))  # (B,K,num_size_cluster,3)
    predicted_size_residual_normalized = ops.ReduceSum(keep_dims=False)(size_residuals_normalized * size_label_one_hot_tiled, 2)  # (B,K,3)


    # mean_size_arr=np.array([[0.76966726, 0.81160211, 0.92573741]])
    mean_size_arr_expanded = ops.ExpandDims()(ops.ExpandDims()(Tensor.from_numpy(mean_size_arr.astype(numpy.float32)), 0), 0)  # (1,1,num_size_cluster,3)
    mean_size_label = ops.ReduceSum(keep_dims=False)(size_label_one_hot_tiled * mean_size_arr_expanded, 2)  # (B,K,3)
    size_residual_label_normalized = size_residual_label / mean_size_label  # (B,K,3)
    # print(size_residual_label_normalized)

    # size_residual_normalized_error = predicted_size_residual_normalized - size_residual_label_normalized
    # size_residual_normalized_loss = size_delta * smoothl1_loss(size_residual_normalized_error, delta=size_delta)  # (B,K,3) -> (B,K)
    size_residual_normalized_loss = size_delta * F.smooth_l1_loss(predicted_size_residual_normalized, size_residual_label_normalized, beta=size_delta, reduction='none')
    size_residual_normalized_loss = ops.ReduceSum(keep_dims=False)(size_residual_normalized_loss * ops.ExpandDims()(fps_obj_gt, 2)) \
    / (ops.ReduceSum(keep_dims=False)(ops.Cast()(fps_obj_gt, ms.float32) + 1e-6))

    # Compute semantic cls loss
    sem_cls_label = ops.GatherD()(target_bboxes_semcls, 1, object_assignment)  # select (B,K) from (B,K2)
    sem_cls_label = ops.Cast()(sem_cls_label, ms.int32)
    criterion_sem_cls = nn.CrossEntropyLoss(reduction='none')
    sem_cls_loss = criterion_sem_cls(ops.Transpose()(sem_cls_scores,(0, 2, 1)), sem_cls_label)  # (B,K)
    # print(sem_cls_scores)
    sem_cls_loss = ops.ReduceSum(keep_dims=False)(sem_cls_loss * fps_obj_gt) / (ops.ReduceSum(keep_dims=False)(ops.Cast()(fps_obj_gt, ms.float32)) + 1e-6)
    
    # means average proposal with prediction loss
    
    # print('center_loss: {}, size_class_loss: {}, size_residual_normalized_loss:{}'.format(center_loss, size_class_loss, size_residual_normalized_loss))
    obj_loss_coef=1.0
    center_loss_coef = 10.0
    heading_class_loss_coef = 1.0
    heading_residual_normalized_loss_coef = 10.0
    box_loss_coef=1
    sem_cls_loss_coef=0.1
    
    box_loss_sum = 10.0 * center_loss +  1.0 * heading_class_loss + 10.0 * heading_residual_normalized_loss + 1.0 * size_class_loss + 10.0 * size_residual_normalized_loss
   
    # print('center_loss: {}, objectness_loss_sum: {}'.format(10.0 * center_loss, objectness_loss_sum))
    # sys.exit(0)
    loss = 1.0 * objectness_loss_sum + 1.0 * box_loss_sum + 1.0 * sem_cls_loss# 8.0 * KPS_loss_sum + 
    # loss *= 10
    return 8.0 * KPS_loss_sum, loss, (objectness_loss_sum, box_loss_sum, sem_cls_loss, center_loss, heading_class_loss, heading_residual_normalized_loss, size_class_loss, size_residual_normalized_loss)

class SigmoidFocalClassificationLoss(nn.Cell):
    """"
    Sigmoid focal-loss for classification.
    from https://gitee.com/mindspore/models/blob/master/official/cv/RetinaNet/src/retinanet.py

    Args:
        gamma (float): Hyper-parameter to balance the easy and hard examples. Default: 2.0
        alpha (float): Hyper-parameter to balance the positive and negative example. Default: 0.25

    Returns:
        Tensor, the focal loss.
    """
    def __init__(self, gamma=2.0, alpha=0.25):
        super(SigmoidFocalClassificationLoss, self).__init__()
        self.sigmiod_cross_entropy = P.SigmoidCrossEntropyWithLogits()
        self.sigmoid = P.Sigmoid()
        self.pow = P.Pow()
        self.onehot = P.OneHot()
        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.gamma = gamma
        self.alpha = alpha

    def construct(self, logits, label):
        # label = self.onehot(label, F.shape(logits)[-1], self.on_value, self.off_value)
        sigmiod_cross_entropy = self.sigmiod_cross_entropy(logits, label)
        sigmoid = self.sigmoid(logits)
        label = F.cast(label, mstype.float32)
        p_t = label * sigmoid + (1 - label) * (1 - sigmoid)
        modulating_factor = self.pow(1 - p_t, self.gamma)
        alpha_weight_factor = label * self.alpha + (1 - label) * (1 - self.alpha)
        focal_loss = modulating_factor * alpha_weight_factor * sigmiod_cross_entropy
        return focal_loss