import torch
from mmdet.models.detectors.base import ForwardResults
from mmdet.structures.det_data_sample import OptSampleList

from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import is_dynamic_shape

from mmdeploy.codebase.mmdet.models.detectors.base_detr import \
        _set_metainfo, __predict_impl


@FUNCTION_REWRITER.register_rewriter(
    'mmdet.models.detectors.base_detr.DetectionTransformer.predict',
    backend='ascend')
def detection_transformer__predict(self,
                                   batch_inputs: torch.Tensor,
                                   data_samples: OptSampleList = None,
                                   rescale: bool = True,
                                   **kwargs) -> ForwardResults:
    """Rewrite `predict` for default backend.

    Support configured dynamic/static shape for model input and return
    detection result as Tensor instead of numpy array.

    Args:
        batch_inputs (Tensor): Inputs with shape (N, C, H, W).
        data_samples (List[:obj:`DetDataSample`]): The Data
            Samples. It usually includes information such as
            `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
        rescale (Boolean): rescale result or not.

    Returns:
        tuple[Tensor]: Detection results of the
        input images.
            - dets (Tensor): Classification bboxes and scores.
                Has a shape (num_instances, 5)
            - labels (Tensor): Labels of bboxes, has a shape
                (num_instances, ).
    """
    ctx = FUNCTION_REWRITER.get_context()

    deploy_cfg = ctx.cfg

    # img_shape = torch._shape_as_tensor(batch_inputs)[2:]
    # if not is_dynamic_flag:
    #     img_shape = [int(val) for val in img_shape]
    img_shape = data_samples[0].resize_shape

    # set the metainfo
    data_samples = _set_metainfo(data_samples, img_shape)

    return __predict_impl(self, batch_inputs, data_samples, rescale)
