# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn.functional as F
from mmdet.core.bbox.iou_calculators import bbox_overlaps
from mmdet.core.bbox.transforms import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh
from typing import Optional, Tuple, Union
from detrsmpl.core.conventions.keypoints_mapping import get_keypoint_idx
from detrsmpl.utils.geometry import project_points

from .builder import MATCH_COST


@MATCH_COST.register_module()
class BBoxL1Cost:
    """BBoxL1Cost.

     Args:
         weight (int | float, optional): loss_weight
         box_format (str, optional): 'xyxy' for DETR, 'xywh' for Sparse_RCNN

     Examples:
         >>> from mmdet.core.bbox.match_costs.match_cost import BBoxL1Cost
         >>> import torch
         >>> self = BBoxL1Cost()
         >>> bbox_pred = torch.rand(1, 4)
         >>> gt_bboxes= torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]])
         >>> factor = torch.tensor([10, 8, 10, 8])
         >>> self(bbox_pred, gt_bboxes, factor)
         tensor([[1.6172, 1.6422]])
    """
    def __init__(self, weight=1., box_format='xyxy'):
        self.weight = weight
        assert box_format in ['xyxy', 'xywh']
        self.box_format = box_format

    def __call__(self, bbox_pred, gt_bboxes):
        """
        Args:
            bbox_pred (Tensor): Predicted boxes with normalized coordinates
                (cx, cy, w, h), which are all in range [0, 1]. Shape
                (num_query, 4).
            gt_bboxes (Tensor): Ground truth boxes with normalized
                coordinates (x1, y1, x2, y2). Shape (num_gt, 4).

        Returns:
            torch.Tensor: bbox_cost value with weight
        """
        if self.box_format == 'xywh':
            gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes)
        elif self.box_format == 'xyxy':
            bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred)
        bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1)
        return bbox_cost * self.weight


@MATCH_COST.register_module()
class FocalLossCost:
    """FocalLossCost.

     Args:
         weight (int | float, optional): loss_weight
         alpha (int | float, optional): focal_loss alpha
         gamma (int | float, optional): focal_loss gamma
         eps (float, optional): default 1e-12
         binary_input (bool, optional): Whether the input is binary,
            default False.

     Examples:
         >>> from mmdet.core.bbox.match_costs.match_cost import FocalLossCost
         >>> import torch
         >>> self = FocalLossCost()
         >>> cls_pred = torch.rand(4, 3)
         >>> gt_labels = torch.tensor([0, 1, 2])
         >>> factor = torch.tensor([10, 8, 10, 8])
         >>> self(cls_pred, gt_labels)
         tensor([[-0.3236, -0.3364, -0.2699],
                [-0.3439, -0.3209, -0.4807],
                [-0.4099, -0.3795, -0.2929],
                [-0.1950, -0.1207, -0.2626]])
    """
    def __init__(self,
                 weight=1.,
                 alpha=0.25,
                 gamma=2,
                 eps=1e-12,
                 binary_input=False):
        self.weight = weight
        self.alpha = alpha
        self.gamma = gamma
        self.eps = eps
        self.binary_input = binary_input

    def _focal_loss_cost(self, cls_pred, gt_labels):
        """
        Args:
            cls_pred (Tensor): Predicted classification logits, shape
                (num_query, num_class).
            gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).

        Returns:
            torch.Tensor: cls_cost value with weight
        """
        cls_pred = cls_pred.sigmoid()
        neg_cost = -(1 - cls_pred + self.eps).log() * (
            1 - self.alpha) * cls_pred.pow(self.gamma)
        pos_cost = -(cls_pred + self.eps).log() * self.alpha * (
            1 - cls_pred).pow(self.gamma)

        cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels]
        return cls_cost * self.weight

    def _mask_focal_loss_cost(self, cls_pred, gt_labels):
        """
        Args:
            cls_pred (Tensor): Predicted classfication logits
                in shape (num_query, d1, ..., dn), dtype=torch.float32.
            gt_labels (Tensor): Ground truth in shape (num_gt, d1, ..., dn),
                dtype=torch.long. Labels should be binary.

        Returns:
            Tensor: Focal cost matrix with weight in shape\
                (num_query, num_gt).
        """
        cls_pred = cls_pred.flatten(1)
        gt_labels = gt_labels.flatten(1).float()
        n = cls_pred.shape[1]
        cls_pred = cls_pred.sigmoid()
        neg_cost = -(1 - cls_pred + self.eps).log() * (
            1 - self.alpha) * cls_pred.pow(self.gamma)
        pos_cost = -(cls_pred + self.eps).log() * self.alpha * (
            1 - cls_pred).pow(self.gamma)

        cls_cost = torch.einsum('nc,mc->nm', pos_cost, gt_labels) + \
            torch.einsum('nc,mc->nm', neg_cost, (1 - gt_labels))
        return cls_cost / n * self.weight

    def __call__(self, cls_pred, gt_labels):
        """
        Args:
            cls_pred (Tensor): Predicted classfication logits.
            gt_labels (Tensor)): Labels.

        Returns:
            Tensor: Focal cost matrix with weight in shape\
                (num_query, num_gt).
        """
        if self.binary_input:
            return self._mask_focal_loss_cost(cls_pred, gt_labels)
        else:
            return self._focal_loss_cost(cls_pred, gt_labels)


@MATCH_COST.register_module()
class ClassificationCost:
    """ClsSoftmaxCost.

     Args:
         weight (int | float, optional): loss_weight

     Examples:
         >>> from mmdet.core.bbox.match_costs.match_cost import \
         ... ClassificationCost
         >>> import torch
         >>> self = ClassificationCost()
         >>> cls_pred = torch.rand(4, 3)
         >>> gt_labels = torch.tensor([0, 1, 2])
         >>> factor = torch.tensor([10, 8, 10, 8])
         >>> self(cls_pred, gt_labels)
         tensor([[-0.3430, -0.3525, -0.3045],
                [-0.3077, -0.2931, -0.3992],
                [-0.3664, -0.3455, -0.2881],
                [-0.3343, -0.2701, -0.3956]])
    """
    def __init__(self, weight=1.):
        self.weight = weight

    def __call__(self, cls_pred, gt_labels):
        """
        Args:
            cls_pred (Tensor): Predicted classification logits, shape
                (num_query, num_class).
            gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).

        Returns:
            torch.Tensor: cls_cost value with weight
        """
        # Following the official DETR repo, contrary to the loss that
        # NLL is used, we approximate it in 1 - cls_score[gt_label].
        # The 1 is a constant that doesn't change the matching,
        # so it can be omitted.
        cls_score = cls_pred.softmax(-1)
        cls_cost = -cls_score[:, gt_labels]
        return cls_cost * self.weight


@MATCH_COST.register_module()
class IoUCost:
    """IoUCost.

     Args:
         iou_mode (str, optional): iou mode such as 'iou' | 'giou'
         weight (int | float, optional): loss weight

     Examples:
         >>> from mmdet.core.bbox.match_costs.match_cost import IoUCost
         >>> import torch
         >>> self = IoUCost()
         >>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]])
         >>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]])
         >>> self(bboxes, gt_bboxes)
         tensor([[-0.1250,  0.1667],
                [ 0.1667, -0.5000]])
    """
    def __init__(self, iou_mode='giou', weight=1.):
        self.weight = weight
        self.iou_mode = iou_mode

    def __call__(self, bboxes, gt_bboxes):
        """
        Args:
            bboxes (Tensor): Predicted boxes with unnormalized coordinates
                (x1, y1, x2, y2). Shape (num_query, 4).
            gt_bboxes (Tensor): Ground truth boxes with unnormalized
                coordinates (x1, y1, x2, y2). Shape (num_gt, 4).

        Returns:
            torch.Tensor: iou_cost value with weight
        """
        # overlaps: [num_bboxes, num_gt]
        overlaps = bbox_overlaps(bboxes,
                                 gt_bboxes,
                                 mode=self.iou_mode,
                                 is_aligned=False)
        # The 1 is a constant that doesn't change the matching, so omitted.
        iou_cost = -overlaps
        return iou_cost * self.weight


@MATCH_COST.register_module()
class DiceCost:
    """Cost of mask assignments based on dice losses.

    Args:
        weight (int | float, optional): loss_weight. Defaults to 1.
        pred_act (bool, optional): Whether to apply sigmoid to mask_pred.
            Defaults to False.
        eps (float, optional): default 1e-12.
        naive_dice (bool, optional): If True, use the naive dice loss
            in which the power of the number in the denominator is
            the first power. If Flase, use the second power that
            is adopted by K-Net and SOLO.
            Defaults to True.
    """
    def __init__(self, weight=1., pred_act=False, eps=1e-3, naive_dice=True):
        self.weight = weight
        self.pred_act = pred_act
        self.eps = eps
        self.naive_dice = naive_dice

    def binary_mask_dice_loss(self, mask_preds, gt_masks):
        """
        Args:
            mask_preds (Tensor): Mask prediction in shape (num_query, *).
            gt_masks (Tensor): Ground truth in shape (num_gt, *)
                store 0 or 1, 0 for negative class and 1 for
                positive class.

        Returns:
            Tensor: Dice cost matrix in shape (num_query, num_gt).
        """
        mask_preds = mask_preds.flatten(1)
        gt_masks = gt_masks.flatten(1).float()
        numerator = 2 * torch.einsum('nc,mc->nm', mask_preds, gt_masks)
        if self.naive_dice:
            denominator = mask_preds.sum(-1)[:, None] + \
                gt_masks.sum(-1)[None, :]
        else:
            denominator = mask_preds.pow(2).sum(1)[:, None] + \
                gt_masks.pow(2).sum(1)[None, :]
        loss = 1 - (numerator + self.eps) / (denominator + self.eps)
        return loss

    def __call__(self, mask_preds, gt_masks):
        """
        Args:
            mask_preds (Tensor): Mask prediction logits in shape (num_query, *)
            gt_masks (Tensor): Ground truth in shape (num_gt, *)

        Returns:
            Tensor: Dice cost matrix with weight in shape (num_query, num_gt).
        """
        if self.pred_act:
            mask_preds = mask_preds.sigmoid()
        dice_cost = self.binary_mask_dice_loss(mask_preds, gt_masks)
        return dice_cost * self.weight


@MATCH_COST.register_module()
class CrossEntropyLossCost:
    """CrossEntropyLossCost.

    Args:
        weight (int | float, optional): loss weight. Defaults to 1.
        use_sigmoid (bool, optional): Whether the prediction uses sigmoid
                of softmax. Defaults to True.
    Examples:
         >>> from mmdet.core.bbox.match_costs import CrossEntropyLossCost
         >>> import torch
         >>> bce = CrossEntropyLossCost(use_sigmoid=True)
         >>> cls_pred = torch.tensor([[7.6, 1.2], [-1.3, 10]])
         >>> gt_labels = torch.tensor([[1, 1], [1, 0]])
         >>> print(bce(cls_pred, gt_labels))
    """
    def __init__(self, weight=1., use_sigmoid=True):
        assert use_sigmoid, 'use_sigmoid = False is not supported yet.'
        self.weight = weight
        self.use_sigmoid = use_sigmoid

    def _binary_cross_entropy(self, cls_pred, gt_labels):
        """
        Args:
            cls_pred (Tensor): The prediction with shape (num_query, 1, *) or
                (num_query, *).
            gt_labels (Tensor): The learning label of prediction with
                shape (num_gt, *).

        Returns:
            Tensor: Cross entropy cost matrix in shape (num_query, num_gt).
        """
        cls_pred = cls_pred.flatten(1).float()
        gt_labels = gt_labels.flatten(1).float()
        n = cls_pred.shape[1]
        pos = F.binary_cross_entropy_with_logits(cls_pred,
                                                 torch.ones_like(cls_pred),
                                                 reduction='none')
        neg = F.binary_cross_entropy_with_logits(cls_pred,
                                                 torch.zeros_like(cls_pred),
                                                 reduction='none')
        cls_cost = torch.einsum('nc,mc->nm', pos, gt_labels) + \
            torch.einsum('nc,mc->nm', neg, 1 - gt_labels)
        cls_cost = cls_cost / n

        return cls_cost

    def __call__(self, cls_pred, gt_labels):
        """
        Args:
            cls_pred (Tensor): Predicted classification logits.
            gt_labels (Tensor): Labels.

        Returns:
            Tensor: Cross entropy cost matrix with weight in
                shape (num_query, num_gt).
        """
        if self.use_sigmoid:
            cls_cost = self._binary_cross_entropy(cls_pred, gt_labels)
        else:
            raise NotImplementedError

        return cls_cost * self.weight


@MATCH_COST.register_module()
class Keypoints3DCost(object):
    """_summary_

    Args:
        object (_type_): _description_
    """
    def __init__(
        self,
        convention,
        weight=1.0,
    ) -> None:
        self.weight = weight
        self.convention = convention

    def __call__(self,
                 pred_keypoints3d: torch.Tensor,
                 gt_keypoints3d: torch.Tensor,
                 has_keypoints3d: Optional[torch.Tensor] = None):
        """_summary_

        Args:
            pred (torch.Tensor): pred kp3d with shape [instance_num, kp_num, 3/4]
            target (torch.Tensor): gt kp3d with shape [batch_size, kp_num, 3/4]
            pred_conf (_type_, optional): _description_. Defaults to None.
            target_conf (_type_, optional): _description_. Defaults to None.
            keypoint_weight (_type_, optional): _description_. Defaults to None.

        Returns:
            _type_: _description_
        """
        # B: batch_size N: instance_num K: kp_num D: 2 for 2D; 3 for 3D
        Q = pred_keypoints3d.shape[0]  # Q means query num
        N, K, D = gt_keypoints3d.shape

        gt_keypoints3d = gt_keypoints3d.unsqueeze(1).repeat([1, Q, 1, 1])
        keypoints3d_conf = gt_keypoints3d[..., 3].float().unsqueeze(-1)
        keypoints3d_conf = keypoints3d_conf.repeat(1, 1, 1, 3)
        gt_keypoints3d = gt_keypoints3d[..., :3].float()
        pred_keypoints3d = pred_keypoints3d.unsqueeze(0).repeat([N, 1, 1,
                                                                 1]).float()

        right_hip_idx = get_keypoint_idx('right_hip_extra', self.convention)
        left_hip_idx = get_keypoint_idx('left_hip_extra', self.convention)

        gt_pelvis = (gt_keypoints3d[:, :, right_hip_idx, :] +
                     gt_keypoints3d[:, :, left_hip_idx, :]) / 2
        pred_pelvis = (pred_keypoints3d[:, :, right_hip_idx, :] +
                       pred_keypoints3d[:, :, left_hip_idx, :]) / 2

        gt_keypoints3d = gt_keypoints3d - gt_pelvis[:, :, None, :]
        pred_keypoints3d = pred_keypoints3d - pred_pelvis[:, :, None, :]

        # [Q, N]
        loss = torch.abs(gt_keypoints3d - pred_keypoints3d).sum([-2,
                                                                 -1]).permute(
                                                                     1, 0)
        # shape: N
        avg_factor = (keypoints3d_conf[:, 0, :, 0] > 0).sum(-1)

        loss = self.weight * (loss / avg_factor)
        return loss


@MATCH_COST.register_module()
class Keypoints2DCost(object):
    """_summary_

    Args:
        object (_type_): _description_
    """
    def __init__(
        self,
        convention,
        weight=1.0,
        img_res=512,
        focal_length=5000.,
    ) -> None:
        self.weight = weight
        self.convention = convention
        self.img_res = img_res
        self.focal_length = focal_length

    def __call__(self,
                 pred_keypoints3d: torch.Tensor,
                 pred_camera: torch.Tensor,
                 gt_keypoints2d: torch.Tensor,
                 has_keypoints2d: Optional[torch.Tensor] = None):
        """_summary_

        Args:
            pred (torch.Tensor): pred kp3d with shape [instance_num, kp_num, 3/4]
            target (torch.Tensor): gt kp3d with shape [batch_size, kp_num, 3/4]
            pred_conf (_type_, optional): _description_. Defaults to None.
            target_conf (_type_, optional): _description_. Defaults to None.
            keypoint_weight (_type_, optional): _description_. Defaults to None.

        Returns:
            _type_: _description_
        """
        # B: batch_size N: instance_num K: kp_num D: 2 for 2D; 3 for 3D
        Q = pred_keypoints3d.shape[0]  # Q means query num
        N, K, D = gt_keypoints2d.shape

        gt_keypoints2d = gt_keypoints2d.unsqueeze(1).repeat([1, Q, 1, 1])
        keypoints2d_conf = gt_keypoints2d[..., 2].float().unsqueeze(-1)
        keypoints2d_conf = keypoints2d_conf.repeat(1, 1, 1, 2)
        gt_keypoints2d = gt_keypoints2d[..., :2].float()
        pred_keypoints3d = pred_keypoints3d.unsqueeze(0).repeat([N, 1, 1,
                                                                 1]).float()
        pred_camera = pred_camera.unsqueeze(0).repeat([N, 1, 1]).float()

        cam_t = torch.stack([
            pred_camera[..., 1], pred_camera[..., 2], 2 * self.focal_length /
            (self.img_res * pred_camera[..., 0] + 1e-9)
        ],
                            dim=-1)

        K = torch.zeros([N, Q, 3, 3], device=pred_keypoints3d.device)
        K[..., 0, 0] = self.focal_length
        K[..., 1, 1] = self.focal_length
        K[..., 2, 2] = 1.
        K[..., :-1, -1] = torch.tensor([self.img_res / 2., self.img_res / 2.],
                                       device=pred_keypoints3d.device)

        # transform
        pred_keypoints3d_ = pred_keypoints3d + cam_t.unsqueeze(2)
        projected_kp3d = pred_keypoints3d_ / pred_keypoints3d_[
            ..., -1].unsqueeze(-1)

        # apply camera instrics
        projected_kp3d = torch.einsum('nqij,nqkj->nqki', K, projected_kp3d)
        pred_keypoints2d = projected_kp3d[..., :-1]

        # Normalize keypoints to [-1, 1]
        pred_keypoints2d = 2 * pred_keypoints2d / (self.img_res - 1)
        gt_keypoints2d = 2 * gt_keypoints2d / (self.img_res - 1)

        # computer loss
        # [Q, N]
        loss = torch.abs(gt_keypoints2d - pred_keypoints2d).sum([-2,
                                                                 -1]).permute(
                                                                     1, 0)
        # shape: N
        avg_factor = (keypoints2d_conf[:, 0, :, 0] > 0).sum(-1)

        loss = self.weight * (loss / avg_factor)
        return loss


@MATCH_COST.register_module()
class KeypointsMSECost(object):
    """_summary_

    Args:
        object (_type_): _description_
    """
    def __init__(self, weight=1.0) -> None:
        self.weight = weight

    def __call__(self,
                 pred,
                 target,
                 pred_conf=None,
                 target_conf=None,
                 keypoint_weight=None):

        N = pred.shape[0]  # N means instance num
        B, K, D = pred

        pred_conf = pred_conf.view((N, B, K, 1)) \
            if pred_conf is not None else 1.0
        target_conf = target_conf.view((N, B, K, 1)) \
            if target_conf is not None else 1.0
        keypoint_weight = keypoint_weight.view((1, 1, K, 1)) \
            if keypoint_weight is not None else \
            self.keypoint_weight.view((1, 1, K, 1)).type_as(pred) \
            if self.keypoint_weight is not None else 1.0

        weight = keypoint_weight * pred_conf * target_conf

        # B: batch_size N: instance_num K: kp_num D: 2 for 2D; 3 for 3D
        pred = pred.unsqueeze(0).repeat([B, 1, 1, 1])  # B, N, K, D
        target = target.unsqueeze(1).repeat([1, N, 1, 1])

        loss = self.weight * (weight * F.mse_loss(
            pred, target, reduction='none').sum(-1)).permute(1, 0)

        return loss
