import time
from typing import Dict, Optional, Sequence, Union

import tensorrt as trt
import torch
import torch.onnx
import argparse
from pcdet.datasets import build_dataloader
from pcdet.models import build_network

from pcdet.config import cfg, cfg_from_yaml_file, update_cfg_by_args

from pcdet.models import load_data_to_gpu
from pcdet.utils import common_utils


def parse_args():
    parser = argparse.ArgumentParser(description='Deploy BEVDet with Tensorrt')
    parser.add_argument('--cfg_file', type=str, default='./configs/stereo/kitti_models/dsgn2_wwdata_dropmsv_cnn_384x928.yaml', help='specify the config for training')
    parser.add_argument('engine', help='checkpoint file')
    parser.add_argument('--samples', default=500, help='samples to benchmark')
    parser.add_argument('--postprocessing', action='store_true')
    parser.add_argument('--eval', action='store_true')
    parser.add_argument('--prefetch', action='store_true',
                        help='use prefetch to accelerate the data loading, '
                             'the inference speed is sightly degenerated due '
                             'to the computational occupancy of prefetch')
    args = parser.parse_args()
    cfg_from_yaml_file(args.cfg_file, cfg)
    # update_cfg_by_args(cfg, args)

    return args, cfg

def torch_dtype_from_trt(dtype: trt.DataType) -> torch.dtype:
    """Convert pytorch dtype to TensorRT dtype.

    Args:
        dtype (str.DataType): The data type in tensorrt.

    Returns:
        torch.dtype: The corresponding data type in torch.
    """

    if dtype == trt.bool:
        return torch.bool
    elif dtype == trt.int8:
        return torch.int8
    elif dtype == trt.int32:
        return torch.int32
    elif dtype == trt.float16:
        return torch.float16
    elif dtype == trt.float32:
        return torch.float32
    else:
        raise TypeError(f'{dtype} is not supported by torch')
    
class TRTWrapper(torch.nn.Module):
    def __init__(self,
                 engine: Union[str, trt.ICudaEngine],
                 output_names: Optional[Sequence[str]] = None) -> None:
        super().__init__()
        self.engine = engine
        if isinstance(self.engine, str):
            with trt.Logger() as logger, trt.Runtime(logger) as runtime:
                with open(self.engine, mode='rb') as f:
                    engine_bytes = f.read()
                self.engine = runtime.deserialize_cuda_engine(engine_bytes)
        self.context = self.engine.create_execution_context()
        names = [_ for _ in self.engine]
        input_names = list(filter(self.engine.binding_is_input, names))
        self._input_names = input_names
        self._output_names = output_names

        if self._output_names is None:
            output_names = list(set(names) - set(input_names))
            self._output_names = output_names

    def forward(self, inputs: Dict[str, torch.Tensor]):
        bindings = [None] * (len(self._input_names) + len(self._output_names))
        for input_name, input_tensor in inputs.items():
            idx = self.engine.get_binding_index(input_name)
            self.context.set_binding_shape(idx, tuple(input_tensor.shape))
            bindings[idx] = input_tensor.contiguous().data_ptr()

            # create output tensors
        outputs = {}
        for output_name in self._output_names:
            idx = self.engine.get_binding_index(output_name)
            dtype = torch_dtype_from_trt(self.engine.get_binding_dtype(idx))
            shape = tuple(self.context.get_binding_shape(idx))

            device = torch.device('cuda')
            output = torch.zeros(size=shape, dtype=dtype, device=device)
            outputs[output_name] = output
            bindings[idx] = output.data_ptr()
        self.context.execute_async_v2(bindings,
                                      torch.cuda.current_stream().cuda_stream)
        return outputs

def get_plugin_names():
    return [pc.name for pc in trt.get_plugin_registry().plugin_creator_list]

def main():

    load_tensorrt_plugin()

    args,cfg = parse_args()
    logger = common_utils.create_logger()

    # if args.eval:
    #     args.postprocessing=True
    #     print('Warnings: evaluation requirement detected, set '
    #           'postprocessing=True for evaluation purpose')
    # cfg = Config.fromfile(args.config)
    # cfg.model.pretrained = None
    # cfg.model.type = cfg.model.type + 'TRT'
    # cfg = compat_cfg(cfg)
    # cfg.gpu_ids = [0]

    # if not args.prefetch:
    #     cfg.data.test_dataloader.workers_per_gpu=0

    # build dataloader
    # assert cfg.data.test.test_mode

    test_set, test_loader, sampler = build_dataloader(
        dataset_cfg=cfg.DATA_CONFIG,
        class_names=cfg.CLASS_NAMES,
        batch_size=1,
        dist=False, workers=0, logger=logger, training=False
    )

    # build the model
    cfg.MODEL['trt']=True
    model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)

    # build tensorrt model
    trt_model = TRTWrapper(args.engine)

    num_warmup = 50
    pure_inf_time = 0

    init_ = True
    metas = dict()
    # benchmark with several samples and take the average
    results = list()
    for i, data in enumerate(test_loader):
        load_data_to_gpu(data)
        data['save_container'] = True
        if init_:
            batch_dict = model(data)
            data_to_cpp = model.backbone_3d.data_to_cpp
            downsampled_disp = data_to_cpp['downsampled_disp'].contiguous(),
            norm_coord_imgs = data_to_cpp['norm_coord_imgs'].contiguous(),
            valids = data_to_cpp['valids'].contiguous(),
            # inputs = [t.cuda() for t in data['img_inputs'][0]]
            # metas_ = model.get_bev_pool_input(inputs)
            # metas = dict(
            #     ranks_bev=metas_[0].int().contiguous(),
            #     ranks_depth=metas_[1].int().contiguous(),
            #     ranks_feat=metas_[2].int().contiguous(),
            #     interval_starts=metas_[3].int().contiguous(),
            #     interval_lengths=metas_[4].int().contiguous())
            init_ = False
        img = data['img_inputs'][0][0].cuda().squeeze(0).contiguous()
        torch.cuda.synchronize()
        start_time = time.perf_counter()
        trt_output = trt_model.forward(dict(left_img=data['left_img'].contiguous(),
                                            right_img=data['right_img'],
                                            downsampled_disp=downsampled_disp,
                                            norm_coord_imgs=norm_coord_imgs,
                                            valids=valids,
                                            )
                                       )

        # postprocessing
        if args.postprocessing:
            trt_output = [trt_output[f'output_{i}'] for i in
                          range(6 * len(model.pts_bbox_head.task_heads))]
            pred = model.result_deserialize(trt_output)
            cls_preds=pred['cls_preds'], box_preds=pred['box_preds'], dir_cls_preds=pred['dir_cls_preds']
            
            batch_cls_preds, batch_box_preds = model.xxhead.generate_predicted_boxes(1,cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds)
            batch_dict=dict(batch_cls_preds=batch_cls_preds, batch_box_preds=batch_box_preds)
            pred_dict = model.xxhead.post_processing(batch_dict)
            
            img_metas = [dict(box_type_3d=LiDARInstance3DBoxes)]
            bbox_list = model.pts_bbox_head.get_bboxes(
                pred, img_metas, rescale=True)
            bbox_results = [
                bbox3d2result(bboxes, scores, labels)
                for bboxes, scores, labels in bbox_list
            ]
            if args.eval:
                results.append(bbox_results[0])
        torch.cuda.synchronize()
        elapsed = time.perf_counter() - start_time

        if i >= num_warmup:
            pure_inf_time += elapsed
            if (i + 1) % 50 == 0:
                fps = (i + 1 - num_warmup) / pure_inf_time
                print(f'Done image [{i + 1:<3}/ {args.samples}], '
                      f'fps: {fps:.2f} img / s')

        if (i + 1) == args.samples:
            pure_inf_time += elapsed
            fps = (i + 1 - num_warmup) / pure_inf_time
            print(f'Overall \nfps: {fps:.2f} img / s '
                  f'\ninference time: {1000/fps:.2f} ms')
            if not args.eval:
                return

    assert args.eval
    eval_kwargs = cfg.get('evaluation', {}).copy()
    # hard-code way to remove EvalHook args
    for key in [
        'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
        'rule'
    ]:
        eval_kwargs.pop(key, None)
    eval_kwargs.update(dict(metric=args.eval))
    print(dataset.evaluate(results, **eval_kwargs))


if __name__ == '__main__':
    fps = main()