# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
this code is base on https://github.com/Sense-X/Co-DETR/blob/main/projects/models/co_dino_head.py
"""
from functools import partial

import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
import paddle.distributed as dist

from ..transformers.utils import get_contrastive_denoising_training_group
from .co_deformable_detr_head import CoDeformDETRHead
from ppdet.modeling.transformers.utils import bbox_cxcywh_to_xyxy, inverse_sigmoid, bbox_xyxy_to_cxcywh
from ppdet.data.transform.atss_assigner import bbox_overlaps

__all__ = ["CoDINOHead"]


def reduce_mean(tensor):
    """ "Obtain the mean of tensor on different GPUs."""
    if not (dist.get_world_size() and dist.is_initialized()):
        return tensor
    tensor = tensor.clone()
    dist.all_reduce(
        tensor.divide_(paddle.to_tensor(dist.get_world_size(), dtype="float32")),
        op=dist.ReduceOp.SUM,
    )
    return tensor


def multi_apply(func, *args, **kwargs):
    """Apply function to a list of arguments.

    Note:
        This function applies the ``func`` to multiple inputs and
        map the multiple outputs of the ``func`` into different
        list. Each list contains the same type of outputs corresponding
        to different inputs.

    Args:
        func (Function): A function that will be applied to a list of
            arguments

    Returns:
        tuple(list): A tuple containing multiple list, each list contains \
            a kind of returned results by the function
    """
    pfunc = partial(func, **kwargs) if kwargs else func
    map_results = map(pfunc, *args)
    res = tuple(map(list, zip(*map_results)))
    return res


@register
class CoDINOHead(CoDeformDETRHead):
    def __init__(self,
                 in_channels,
                 num_classes=80,
                 num_query=900,
                 num_dn_query=100,
                 transformer=None,
                 positional_encoding="SinePositionalEncoding",
                 loss_cls="QualityFocalLoss",
                 loss_bbox="L1Loss",
                 loss_iou="GIoULoss",
                 assigner="HungarianAssigner",
                 sampler="PseudoSampler",
                 label_noise_ratio=0.5,
                 box_noise_scale=1.0,
                 **kwargs):
        self.num_dn_query = num_dn_query
        self.label_noise_ratio = label_noise_ratio
        self.box_noise_scale = box_noise_scale

        if hasattr(transformer, 'two_stage_num_proposals'):
            assert transformer.two_stage_num_proposals == num_query, \
                'two_stage_num_proposals must be equal to num_query for DINO'
        else:
            transformer.two_stage_num_proposals = num_query
        super(CoDINOHead, self).__init__(in_channels,
                                         num_classes, 
                                         num_query=num_query, 
                                         transformer=transformer, 
                                         positional_encoding=positional_encoding,
                                         loss_cls=loss_cls,
                                         loss_bbox=loss_bbox,
                                         loss_iou=loss_iou,
                                         assigner=assigner,
                                         sampler=sampler, 
                                         **kwargs)

        assert self.as_two_stage, \
            'as_two_stage must be True for DINO'
        assert self.with_box_refine, \
            'with_box_refine must be True for DINO'

    def _init_layers(self):
        super()._init_layers()
        self.query_embedding = None
        # NOTE The original repo of DINO set the num_embeddings 92 for coco,
        # 91 (0~90) of which represents target classes and the 92 (91)
        # indicates [Unknown] class. However, the embedding of unknown class
        # is not used in the original DINO
        self.label_embedding = nn.Embedding(self.cls_out_channels,
                                            self.embed_dims)
        self.downsample = nn.Sequential(
            nn.Conv2D(self.embed_dims, self.embed_dims, kernel_size=3, stride=2, padding=1),
            nn.GroupNorm(32, self.embed_dims))

    def forward_train(self,
                      x,
                      img_metas,
                      gt_bboxes,
                      gt_labels=None,
                      gt_bboxes_ignore=None,
                      proposal_cfg=None,
                      **kwargs):
        assert proposal_cfg is None, '"proposal_cfg" must be None'

        gt_norm_bboxes_list = []
        for img_meta, bboxes in zip(img_metas, gt_bboxes):
            img_h, img_w, _ = img_meta['img_shape']
            factor = paddle.to_tensor([img_w, img_h, img_w, img_h], 
                                      dtype=bboxes.dtype).unsqueeze(0)
            bboxes_normalized = bbox_xyxy_to_cxcywh(bboxes) / factor if \
                len(bboxes) > 0 else bboxes
            gt_norm_bboxes_list.append(bboxes_normalized)
        gt_meta = {'gt_class': gt_labels, 'gt_bbox': gt_norm_bboxes_list}

        dn_label_query, dn_bbox_query, attn_mask, dn_meta = \
            get_contrastive_denoising_training_group(gt_meta,
                                                     self.num_classes,
                                                     self.num_query,
                                                     self.label_embedding.weight,
                                                     self.num_dn_query,
                                                     self.label_noise_ratio,
                                                     self.box_noise_scale)
        if dn_label_query is None:
            dn_label_query = paddle.zeros((1, 0, self.embed_dims), dtype=paddle.float32)
            dn_bbox_query = paddle.zeros((1, 0, 4), dtype=paddle.float32)
            attn_mask = paddle.ones((self.num_query, self.num_query), dtype=paddle.bool)
            dn_meta = {
                "dn_num_group": 1,
                "dn_num_split": [0, 900]
            }
        outs = self(x, img_metas, dn_label_query, dn_bbox_query, attn_mask)
        if gt_labels is None:
            loss_inputs = outs + (gt_bboxes, img_metas, dn_meta)
        else:
            loss_inputs = outs + (gt_bboxes, gt_labels, img_metas, dn_meta)
        losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
        enc_outputs = outs[-1]
        return losses, enc_outputs

    def forward(self, 
                mlvl_feats, 
                img_metas=None,
                dn_label_query=None,
                dn_bbox_query=None,
                attn_mask=None):
        batch_size = mlvl_feats[0].shape[0]
        if img_metas is not None:
            input_img_h, input_img_w = img_metas[0]["batch_input_shape"]
            img_masks = paddle.zeros((batch_size, input_img_h, input_img_w),mlvl_feats[0].dtype)
            for img_id in range(batch_size):
                img_h, img_w, _ = img_metas[img_id]["img_shape"]
                img_masks[img_id, :img_h, :img_w] = 1
        
        mlvl_masks = []
        mlvl_positional_encodings = []
        for feat in mlvl_feats:
            if img_metas is None:
                src_mask = paddle.ones((batch_size, feat.shape[-2], feat.shape[-1]), dtype=feat.dtype)
            else:
                src_mask = F.interpolate(img_masks[None], size=feat.shape[-2:]).squeeze(0)
            mlvl_masks.append(src_mask)
            mlvl_positional_encodings.append(
                self.positional_encoding(mlvl_masks[-1]).transpose((0,3,1,2)))
            
        query_embeds = None
        hs, inter_references, topk_score, topk_anchor, enc_outputs = \
            self.transformer(
                mlvl_feats,
                mlvl_masks,
                query_embeds,
                mlvl_positional_encodings,
                dn_label_query,
                dn_bbox_query,
                reg_branches=self.reg_branches if self.with_box_refine else None,
                cls_branches=self.cls_branches if self.as_two_stage else None,
                attn_mask=attn_mask
            )

        outs = []
        num_level = len(mlvl_feats)
        start = 0
        for lvl in range(num_level):
            bs, c, h, w = mlvl_feats[lvl].shape
            end = start + h * w
            feat = enc_outputs[:, start:end].transpose((0, 2, 1)).contiguous()
            start = end
            outs.append(feat.reshape((bs, c, h, w)))
        outs.append(self.downsample(outs[-1]))

        if dn_label_query is not None and dn_label_query.shape[1] == 0:
            # NOTE: If there is no target in the image, the parameters of
            # label_embedding won't be used in producing loss, which raises
            # RuntimeError when using distributed mode.
            hs[0] += self.label_embedding.weight[0, 0] * 0.0

        outputs_classes = []
        outputs_coords = []

        for lvl in range(hs.shape[0]):
            reference = inter_references[lvl]
            reference = inverse_sigmoid(reference, eps=1e-3)
            outputs_class = self.cls_branches[lvl](hs[lvl])
            tmp = self.reg_branches[lvl](hs[lvl])
            if reference.shape[-1] == 4:
                tmp += reference
            else:
                assert reference.shape[-1] == 2
                tmp[..., :2] += reference
            outputs_coord = tmp.sigmoid()
            outputs_classes.append(outputs_class)
            outputs_coords.append(outputs_coord)

        outputs_classes = paddle.stack(outputs_classes)
        outputs_coords = paddle.stack(outputs_coords)

        return outputs_classes, outputs_coords, topk_score, topk_anchor, outs

    def forward_aux(self, mlvl_feats, img_metas, aux_targets, head_idx):
        """Forward function.

        Args:
            mlvl_feats (tuple[Tensor]): Features from the upstream
                network, each is a 4D-tensor with shape
                (N, C, H, W).
            img_metas (list[dict]): List of image information.

        Returns:
            all_cls_scores (Tensor): Outputs from the classification head, \
                shape [nb_dec, bs, num_query, cls_out_channels]. Note \
                cls_out_channels should includes background.
            all_bbox_preds (Tensor): Sigmoid outputs from the regression \
                head with normalized coordinate format (cx, cy, w, h). \
                Shape [nb_dec, bs, num_query, 4].
            enc_outputs_class (Tensor): The score of each point on encode \
                feature map, has shape (N, h*w, num_class). Only when \
                as_two_stage is True it would be returned, otherwise \
                `None` would be returned.
            enc_outputs_coord (Tensor): The proposal generate from the \
                encode feature map, has shape (N, h*w, 4). Only when \
                as_two_stage is True it would be returned, otherwise \
                `None` would be returned.
        """
        (
            aux_coords,
            aux_labels,
            aux_targets,
            aux_label_weights,
            aux_bbox_weights,
            aux_feats,
            attn_masks,
        ) = aux_targets
        batch_size = mlvl_feats[0].shape[0]
        input_img_h, input_img_w = img_metas[0]["batch_input_shape"]
        img_masks = paddle.zeros(
            (batch_size, input_img_h, input_img_w), mlvl_feats[0].dtype)
        for img_id in range(batch_size):
            img_h, img_w, _ = img_metas[img_id]["img_shape"]
            img_masks[img_id, :img_h, :img_w] = 1

        mlvl_masks = []
        mlvl_positional_encodings = []
        for feat in mlvl_feats:
            mlvl_masks.append(
                F.interpolate(img_masks[None], size=feat.shape[-2:]).squeeze(0))
            mlvl_positional_encodings.append(
                self.positional_encoding(mlvl_masks[-1]).transpose((0,3,1,2)))

        query_embeds = None
        hs, inter_references = self.transformer.forward_aux(
            mlvl_feats,
            mlvl_masks,
            query_embeds,
            mlvl_positional_encodings,
            aux_coords,
            pos_feats=aux_feats,
            reg_branches=self.reg_branches if self.with_box_refine else None,  # noqa:E501
            cls_branches=self.cls_branches if self.as_two_stage else None,  # noqa:E501
            return_encoder_output=True,
            attn_masks=attn_masks,
            head_idx=head_idx,
        )

        outputs_classes = []
        outputs_coords = []

        for lvl in range(hs.shape[0]):
            reference = inter_references[lvl]
            reference = inverse_sigmoid(reference, eps=1e-3)
            outputs_class = self.cls_branches[lvl](hs[lvl])
            tmp = self.reg_branches[lvl](hs[lvl])
            if reference.shape[-1] == 4:
                tmp += reference
            else:
                assert reference.shape[-1] == 2
                tmp[..., :2] += reference
            outputs_coord = tmp.sigmoid()
            outputs_classes.append(outputs_class)
            outputs_coords.append(outputs_coord)

        outputs_classes = paddle.stack(outputs_classes)
        outputs_coords = paddle.stack(outputs_coords)

        return outputs_classes, outputs_coords, None, None

    def loss_single_aux(self,
                        cls_scores,
                        bbox_preds,
                        labels,
                        label_weights,
                        bbox_targets,
                        bbox_weights,
                        img_metas,
                        gt_bboxes_ignore_list=None):
        """ "Loss function for outputs from a single decoder layer of a single
        feature level.

        Args:
            cls_scores (Tensor): Box score logits from a single decoder layer
                for all images. Shape [bs, num_query, cls_out_channels].
            bbox_preds (Tensor): Sigmoid outputs from a single decoder layer
                for all images, with normalized coordinate (cx, cy, w, h) and
                shape [bs, num_query, 4].
            gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
                with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
            gt_labels_list (list[Tensor]): Ground truth class indices for each
                image with shape (num_gts, ).
            img_metas (list[dict]): List of image meta information.
            gt_bboxes_ignore_list (list[Tensor], optional): Bounding
                boxes which can be ignored for each image. Default None.

        Returns:
            dict[str, Tensor]: A dictionary of loss components for outputs from
                a single decoder layer.
        """
        num_imgs = cls_scores.shape[0]
        num_q = cls_scores.shape[1]
        try:
            labels = labels.reshape((num_imgs * num_q,))
            label_weights = label_weights.reshape((num_imgs * num_q,))
            bbox_targets = bbox_targets.reshape((num_imgs * num_q, 4))
            bbox_weights = bbox_weights.reshape((num_imgs * num_q, 4))
        except:
            return cls_scores.mean()*0, cls_scores.mean()*0, cls_scores.mean()*0

        bg_class_ind = self.num_classes
        num_total_pos = len(
            ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1))
        num_total_neg = num_imgs * num_q - num_total_pos

        # classification loss
        cls_scores = cls_scores.reshape((-1, self.cls_out_channels))
        # construct weighted avg_factor to match with the official DETR repo
        cls_avg_factor = num_total_pos * 1.0 + \
            num_total_neg * self.bg_cls_weight
        if self.sync_cls_avg_factor:
            cls_avg_factor = reduce_mean(
                paddle.to_tensor([cls_avg_factor], dtype=cls_scores.dtype))
        cls_avg_factor = max(cls_avg_factor, 1)

        bg_class_ind = self.num_classes
        pos_inds = ((labels >= 0)
                    & (labels < bg_class_ind)).nonzero().squeeze(1)
        scores = paddle.zeros_like(labels, dtype=label_weights.dtype)
        pos_bbox_targets = bbox_targets[pos_inds]
        pos_decode_bbox_targets = bbox_cxcywh_to_xyxy(pos_bbox_targets) if \
            len(pos_bbox_targets) > 0 else pos_bbox_targets
        pos_bbox_pred = bbox_preds.reshape((-1, 4))[pos_inds]
        pos_decode_bbox_pred = bbox_cxcywh_to_xyxy(pos_bbox_pred) if \
            len(pos_bbox_pred) > 0 else pos_bbox_pred
        scores[pos_inds] = bbox_overlaps(
            pos_decode_bbox_pred.detach().numpy(),
            pos_decode_bbox_targets.numpy(),
            is_aligned=True)
        loss_cls = self.loss_cls(
            cls_scores, (labels, scores), 
            weight=label_weights,
            avg_factor=cls_avg_factor)

        # Compute the average number of gt boxes across all gpus, for
        # normalization purposes
        num_total_pos = paddle.to_tensor([num_total_pos], dtype=loss_cls.dtype)
        num_total_pos = paddle.clip(reduce_mean(num_total_pos), min=1).item()

        # construct factors used for rescale bboxes
        factors = []
        for img_meta, bbox_pred in zip(img_metas, bbox_preds):
            img_h, img_w, _ = img_meta["img_shape"]
            factor = paddle.to_tensor([img_w, img_h, img_w, img_h], 
                                      dtype=bbox_pred.dtype).unsqueeze(0).tile(
                                          (bbox_pred.shape[0], 1))
            factors.append(factor)
        factors = paddle.concat(factors, 0)

        # DETR regress the relative position of boxes (cxcywh) in the image,
        # thus the learning target is normalized by the image size. So here
        # we need to re-scale them for calculating IoU loss
        bbox_preds = bbox_preds.reshape((-1, 4))
        bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors
        bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors

        # regression IoU loss, defaultly GIoU loss
        loss_iou = self.loss_iou(bboxes, bboxes_gt, bbox_weights.mean(
            axis=-1, keepdim=True)) / num_total_pos

        # regression L1 loss
        loss_bbox = self.loss_bbox(
            bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos)
        return (
            loss_cls * self.lambda_1,
            loss_bbox * self.lambda_1,
            loss_iou * self.lambda_1,
        )

    def loss(self,
             all_cls_scores,
             all_bbox_preds,
             enc_topk_scores,
             enc_topk_anchors,
             enc_outputs, 
             gt_bboxes_list,
             gt_labels_list,
             img_metas,
             dn_meta=None,
             gt_bboxes_ignore=None):
        loss_dict = dict()

        # extract denoising and matching part of outputs
        all_cls_scores, all_bbox_preds, dn_cls_scores, dn_bbox_preds = \
            self.extract_dn_outputs(all_cls_scores, all_bbox_preds, dn_meta)
        
        if enc_topk_scores is not None:
            enc_loss_cls, enc_losses_bbox, enc_losses_iou = \
                self.loss_single(enc_topk_scores, enc_topk_anchors,
                                 gt_bboxes_list, gt_labels_list,
                                 img_metas, gt_bboxes_ignore)

            # collate loss from encode feature maps
            loss_dict['enc_loss_cls'] = enc_loss_cls
            loss_dict['enc_loss_bbox'] = enc_losses_bbox
            loss_dict['enc_loss_iou'] = enc_losses_iou

        num_dec_layers = len(all_cls_scores)
        all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)]
        all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)]
        all_gt_bboxes_ignore_list = [
            gt_bboxes_ignore for _ in range(num_dec_layers)
        ]
        img_metas_list = [img_metas for _ in range(num_dec_layers)]
        losses_cls, losses_bbox, losses_iou = multi_apply(
            self.loss_single, all_cls_scores, all_bbox_preds,
            all_gt_bboxes_list, all_gt_labels_list, img_metas_list,
            all_gt_bboxes_ignore_list)
        
        # collate loss from the last decoder layer
        loss_dict["loss_cls"] = losses_cls[-1]
        loss_dict["loss_bbox"] = losses_bbox[-1]
        loss_dict["loss_iou"] = losses_iou[-1]
    
        # collate loss from other decoder layers
        num_dec_layer = 0
        for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1],
                                                       losses_bbox[:-1],
                                                       losses_iou[:-1]):
            loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i
            loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i
            loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i
            num_dec_layer += 1

        if dn_cls_scores is not None:
           # calculate denoising loss from all decoder layers
            dn_meta = [dn_meta for _ in img_metas]
            dn_losses_cls, dn_losses_bbox, dn_losses_iou = self.loss_dn(
                dn_cls_scores, dn_bbox_preds, gt_bboxes_list, gt_labels_list,
                img_metas, dn_meta)
            # collate denoising loss
            loss_dict['dn_loss_cls'] = dn_losses_cls[-1]
            loss_dict['dn_loss_bbox'] = dn_losses_bbox[-1]
            loss_dict['dn_loss_iou'] = dn_losses_iou[-1]
            num_dec_layer = 0
            for loss_cls_i, loss_bbox_i, loss_iou_i in zip(
                    dn_losses_cls[:-1], dn_losses_bbox[:-1],
                    dn_losses_iou[:-1]):
                loss_dict[f'd{num_dec_layer}.dn_loss_cls'] = loss_cls_i
                loss_dict[f'd{num_dec_layer}.dn_loss_bbox'] = loss_bbox_i
                loss_dict[f'd{num_dec_layer}.dn_loss_iou'] = loss_iou_i
                num_dec_layer += 1

        return loss_dict

    def _get_bboxes_single(self,
                           cls_score,
                           bbox_pred,
                           img_shape,
                           scale_factor,
                           rescale=False,
                           ):
        """Transform outputs from the last decoder layer into bbox predictions
        for each image.

        Args:
            cls_score (Tensor): Box score logits from the last decoder layer
                for each image. Shape [num_query, cls_out_channels].
            bbox_pred (Tensor): Sigmoid outputs from the last decoder layer
                for each image, with coordinate format (cx, cy, w, h) and
                shape [num_query, 4].
            img_shape (tuple[int]): Shape of input image, (height, width, 3).
            scale_factor (ndarray, optional): Scale factor of the image arange
                as (w_scale, h_scale, w_scale, h_scale).
            rescale (bool, optional): If True, return boxes in original image
                space. Default False.

        Returns:
            tuple[Tensor]: Results of detected bboxes and labels.

                - det_bboxes: Predicted bboxes with shape [num_query, 5], \
                    where the first 4 columns are bounding box positions \
                    (tl_x, tl_y, br_x, br_y) and the 5-th column are scores \
                    between 0 and 1.
                - det_labels: Predicted labels of the corresponding box with \
                    shape [num_query].
        """
        assert len(cls_score) == len(bbox_pred)
        max_per_img = self.test_cfg.get('max_per_img', self.num_query)
        score_thr = self.test_cfg.get('score_thr', 0)

        # exclude background
        if self.loss_cls.use_sigmoid:
            cls_score = F.sigmoid(cls_score)
            scores, indexes = cls_score.reshape([-1]).topk(max_per_img)
            det_labels = indexes % self.num_classes
            bbox_index = indexes // self.num_classes
            bbox_pred = bbox_pred[bbox_index]
        else:
            scores, det_labels = F.softmax(cls_score, axis=-1)[..., :-1].max(-1)
            scores, bbox_index = scores.topk(max_per_img)
            bbox_pred = bbox_pred[bbox_index]
            det_labels = det_labels[bbox_index]

        valid_mask = scores > score_thr
        scores = scores[valid_mask]
        bbox_pred = bbox_pred[valid_mask]
        det_labels = det_labels[valid_mask]

        det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred)
        det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1]
        det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0]
        det_bboxes[:, 0::2].clip(min=0, max=img_shape[1])
        det_bboxes[:, 1::2].clip(min=0, max=img_shape[0])

        if rescale:
            det_bboxes /= paddle.concat([scale_factor[::-1], scale_factor[::-1]])
        det_bboxes = paddle.concat((scores.unsqueeze(1), det_bboxes.astype('float32')), -1)
        proposals = paddle.concat((det_labels.unsqueeze(1).astype('float32'),det_bboxes),-1)
        return proposals

    def loss_single(self,
                    cls_scores,
                    bbox_preds,
                    gt_bboxes_list,
                    gt_labels_list,
                    img_metas,
                    gt_bboxes_ignore_list=None):
        """ "Loss function for outputs from a single decoder layer of a single
        feature level.

        Args:
            cls_scores (Tensor): Box score logits from a single decoder layer
                for all images. Shape [bs, num_query, cls_out_channels].
            bbox_preds (Tensor): Sigmoid outputs from a single decoder layer
                for all images, with normalized coordinate (cx, cy, w, h) and
                shape [bs, num_query, 4].
            gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
                with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
            gt_labels_list (list[Tensor]): Ground truth class indices for each
                image with shape (num_gts, ).
            img_metas (list[dict]): List of image meta information.
            gt_bboxes_ignore_list (list[Tensor], optional): Bounding
                boxes which can be ignored for each image. Default None.

        Returns:
            dict[str, Tensor]: A dictionary of loss components for outputs from
                a single decoder layer.
        """
        num_imgs = cls_scores.shape[0]
        cls_scores_list = [cls_scores[i] for i in range(num_imgs)]
        bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)]
        cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list,
                                           gt_bboxes_list, gt_labels_list,
                                           img_metas, gt_bboxes_ignore_list)
        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
         num_total_pos, num_total_neg,) = cls_reg_targets
        labels = paddle.concat(labels_list, 0)
        label_weights = paddle.concat(label_weights_list, 0)
        bbox_targets = paddle.concat(bbox_targets_list, 0)
        bbox_weights = paddle.concat(bbox_weights_list, 0)

        # classification loss
        cls_scores = cls_scores.reshape((-1, self.cls_out_channels))
        # construct weighted avg_factor to match with the official DETR repo
        cls_avg_factor = num_total_pos * 1.0 + \
            num_total_neg * self.bg_cls_weight
        if self.sync_cls_avg_factor:
            cls_avg_factor = reduce_mean(
                paddle.to_tensor([cls_avg_factor], dtype=cls_scores.dtype))
        cls_avg_factor = max(cls_avg_factor, 1)

        bg_class_ind = self.num_classes
        pos_inds = ((labels >= 0)
                    & (labels < bg_class_ind)).nonzero().squeeze(1)
        scores = paddle.zeros_like(labels, dtype=label_weights.dtype)
        pos_bbox_targets = bbox_targets[pos_inds]
        pos_decode_bbox_targets = bbox_cxcywh_to_xyxy(pos_bbox_targets) if \
            len(pos_bbox_targets) > 0 else pos_bbox_targets
        pos_bbox_pred = bbox_preds.reshape((-1, 4))[pos_inds]
        pos_decode_bbox_pred = bbox_cxcywh_to_xyxy(pos_bbox_pred) if \
            len(pos_bbox_pred) > 0 else pos_bbox_pred
        scores[pos_inds] = bbox_overlaps(
            pos_decode_bbox_pred.detach().numpy(),
            pos_decode_bbox_targets.numpy(),
            is_aligned=True)
        loss_cls = self.loss_cls(
            cls_scores, (labels, scores),
            weight=label_weights, 
            avg_factor=cls_avg_factor)

        # Compute the average number of gt boxes across all gpus, for
        # normalization purposes
        num_total_pos = paddle.to_tensor([num_total_pos], dtype=loss_cls.dtype)
        num_total_pos = paddle.clip(reduce_mean(num_total_pos), min=1).item()

        # construct factors used for rescale bboxes
        factors = []
        for img_meta, bbox_pred in zip(img_metas, bbox_preds):
            img_h, img_w, _ = img_meta["img_shape"]
            factor = paddle.to_tensor([img_w, img_h, img_w, img_h], 
                                      dtype=bbox_pred.dtype).unsqueeze(0).tile(
                                          (bbox_pred.shape[0], 1))
            factors.append(factor)
        factors = paddle.concat(factors, 0)

        # DETR regress the relative position of boxes (cxcywh) in the image,
        # thus the learning target is normalized by the image size. So here
        # we need to re-scale them for calculating IoU loss
        bbox_preds = bbox_preds.reshape((-1, 4))
        bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors
        bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors

        # regression IoU loss, defaultly GIoU loss
        loss_iou = self.loss_iou(bboxes, bboxes_gt, bbox_weights.mean(axis=-1, 
            keepdim=True)) / num_total_pos

        # regression L1 loss
        loss_bbox = self.loss_bbox(
            bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos)

        return loss_cls, loss_bbox, loss_iou
    
    def get_dn_target(self, dn_bbox_preds_list, gt_bboxes_list, gt_labels_list,
                      img_metas, dn_meta):
        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
         pos_inds_list,
         neg_inds_list) = multi_apply(self._get_dn_target_single,
                                      dn_bbox_preds_list, gt_bboxes_list,
                                      gt_labels_list, img_metas, dn_meta)
        num_total_pos = sum((inds.numel() for inds in pos_inds_list))
        num_total_neg = sum((inds.numel() for inds in neg_inds_list))
        return (labels_list, label_weights_list, bbox_targets_list,
                bbox_weights_list, num_total_pos, num_total_neg)

    def _get_dn_target_single(self, dn_bbox_pred, gt_bboxes, gt_labels,
                              img_meta, dn_meta):
        num_groups = dn_meta['dn_num_group']
        pad_size = dn_meta['dn_num_split'][0]
        assert pad_size % num_groups == 0
        single_pad = pad_size // num_groups
        num_bboxes = dn_bbox_pred.shape[0]

        if len(gt_labels) > 0:
            t = paddle.arange(0, len(gt_labels))
            t = t.unsqueeze(0).tile([num_groups, 1])
            pos_assigned_gt_inds = t.flatten()
            pos_inds = (paddle.arange(num_groups) * single_pad).unsqueeze(1) + t
            pos_inds = pos_inds.flatten()
        else:
            pos_inds = pos_assigned_gt_inds = paddle.to_tensor([], dtype=paddle.int64)
        neg_inds = pos_inds + single_pad // 2

        # label targets
        labels = paddle.full((num_bboxes, ), self.num_classes, dtype=paddle.int64)
        labels[pos_inds] = gt_labels[pos_assigned_gt_inds
                                     ][..., 0].astype(paddle.int64)
        label_weights = paddle.ones(num_bboxes, dtype=gt_bboxes.dtype)

        # bbox targets
        bbox_targets = paddle.zeros_like(dn_bbox_pred)
        bbox_weights = paddle.zeros_like(dn_bbox_pred)
        if len(pos_inds) > 0:
            bbox_weights[pos_inds] = 1.0
        img_h, img_w, _ = img_meta['img_shape']

        # DETR regress the relative position of boxes (cxcywh) in the image.
        # Thus the learning target should be normalized by the image size, also
        # the box format should be converted from defaultly x1y1x2y2 to cxcywh.
        factor = paddle.to_tensor([img_w, img_h, img_w, img_h], 
                                  dtype=dn_bbox_pred.dtype).unsqueeze(0)
        if len(gt_bboxes) > 0:
            gt_bboxes_normalized = gt_bboxes / factor
            gt_bboxes_targets = bbox_xyxy_to_cxcywh(gt_bboxes_normalized)
        else:
            gt_bboxes_targets = gt_bboxes
        bbox_targets[pos_inds] = gt_bboxes_targets.tile([num_groups, 1])

        return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
                neg_inds)

    @staticmethod
    def extract_dn_outputs(all_cls_scores, all_bbox_preds, dn_meta):
        if dn_meta is not None:
            denoising_cls_scores = all_cls_scores[:, :, :
                                                  dn_meta['dn_num_split'][0], :]
            denoising_bbox_preds = all_bbox_preds[:, :, :
                                                  dn_meta['dn_num_split'][0], :]
            matching_cls_scores = all_cls_scores[:, :, dn_meta['dn_num_split'][0]:, :]
            matching_bbox_preds = all_bbox_preds[:, :, dn_meta['dn_num_split'][0]:, :]
        else:
            denoising_cls_scores = None
            denoising_bbox_preds = None
            matching_cls_scores = all_cls_scores
            matching_bbox_preds = all_bbox_preds
        return (matching_cls_scores, matching_bbox_preds, denoising_cls_scores,
                denoising_bbox_preds)

    def loss_dn(self, dn_cls_scores, dn_bbox_preds, gt_bboxes_list,
                gt_labels_list, img_metas, dn_meta):
        num_dec_layers = len(dn_cls_scores)
        all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)]
        all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)]
        img_metas_list = [img_metas for _ in range(num_dec_layers)]
        dn_meta_list = [dn_meta for _ in range(num_dec_layers)]
        return multi_apply(self.loss_dn_single, dn_cls_scores, dn_bbox_preds,
                           all_gt_bboxes_list, all_gt_labels_list,
                           img_metas_list, dn_meta_list)

    def loss_dn_single(self, dn_cls_scores, dn_bbox_preds, gt_bboxes_list,
                       gt_labels_list, img_metas, dn_meta):
        num_imgs = dn_cls_scores.shape[0]
        bbox_preds_list = [dn_bbox_preds[i] for i in range(num_imgs)]
        cls_reg_targets = self.get_dn_target(bbox_preds_list, gt_bboxes_list,
                                             gt_labels_list, img_metas,
                                             dn_meta)
        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
         num_total_pos, num_total_neg) = cls_reg_targets
        if all([len(x) == 0 for x in labels_list]):
            labels = labels_list[0]
            label_weights = label_weights_list[0]
            bbox_targets = bbox_targets_list[0]
            bbox_weights = bbox_weights_list[0]
        else:
            labels = paddle.concat(labels_list, 0)
            label_weights = paddle.concat(label_weights_list, 0)
            bbox_targets = paddle.concat(bbox_targets_list, 0)
            bbox_weights = paddle.concat(bbox_weights_list, 0)

        # classification loss
        cls_scores = dn_cls_scores.reshape((-1, self.cls_out_channels))
        # construct weighted avg_factor to match with the official DETR repo
        cls_avg_factor = \
            num_total_pos * 1.0 + num_total_neg * self.bg_cls_weight
        if self.sync_cls_avg_factor:
            cls_avg_factor = reduce_mean(
                paddle.to_tensor([cls_avg_factor], dtype=cls_scores.dtype))
        cls_avg_factor = max(cls_avg_factor, 1)

        if len(cls_scores) > 0:
            bg_class_ind = self.num_classes
            pos_inds = ((labels >= 0)
                        & (labels < bg_class_ind)).nonzero().squeeze(1)
            scores = paddle.zeros_like(labels, dtype=label_weights.dtype)
            pos_bbox_targets = bbox_targets[pos_inds]
            pos_decode_bbox_targets = bbox_cxcywh_to_xyxy(pos_bbox_targets)
            pos_bbox_pred = dn_bbox_preds.reshape((-1, 4))[pos_inds]
            pos_decode_bbox_pred = bbox_cxcywh_to_xyxy(pos_bbox_pred)
            scores[pos_inds] = bbox_overlaps(
                pos_decode_bbox_pred.detach().numpy(),
                pos_decode_bbox_targets.numpy(),
                is_aligned=True)
            loss_cls = self.loss_cls(
                cls_scores, (labels, scores),
                weight=label_weights,
                avg_factor=cls_avg_factor)
        else:
            loss_cls = paddle.zeros(1, dtype=cls_scores.dtype)

        # Compute the average number of gt boxes across all gpus, for
        # normalization purposes
        num_total_pos = paddle.to_tensor([num_total_pos], dtype=loss_cls.dtype)
        num_total_pos = paddle.clip(reduce_mean(num_total_pos), min=1).item()

        # construct factors used for rescale bboxes
        factors = []
        if dn_bbox_preds.shape[1] > 0:
            for img_meta, bbox_pred in zip(img_metas, dn_bbox_preds):
                img_h, img_w, _ = img_meta['img_shape']
                factor = paddle.to_tensor([img_w, img_h, img_w, img_h], 
                                          dtype=bbox_pred.dtype).unsqueeze(0).tile(
                                              (bbox_pred.shape[0], 1))
                factors.append(factor)
            factors = paddle.concat(factors, 0)

            # DETR regress the relative position of boxes (cxcywh) in the image,
            # thus the learning target is normalized by the image size. So here
            # we need to re-scale them for calculating IoU loss
            bbox_preds = dn_bbox_preds.reshape((-1, 4))
            bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors
            bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors

            # regression IoU loss, defaultly GIoU loss
            loss_iou = self.loss_iou(bboxes, bboxes_gt, bbox_weights.mean(
                axis=-1, keepdim=True)) / num_total_pos

            # regression L1 loss
            loss_bbox = self.loss_bbox(
                bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos)
        else:
            loss_iou = paddle.zeros(1, dtype=dn_bbox_preds.dtype)
            loss_bbox = paddle.zeros(1, dtype=dn_bbox_preds.dtype)
            
        return loss_cls, loss_bbox, loss_iou