import argparse
import os
import re
from pathlib import Path
import numpy as np
import torch
from tools.eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_yaml_file, update_cfg_by_args
from pcdet.datasets import build_dataloader
from pcdet.models import build_network
from pcdet.utils import common_utils
from pcdet.models import load_data_to_gpu
# from from_onnx import from_onnx
from tools.eval_utils.eval_utils import eval_one_epoch_trt
from pathlib import Path

torch.backends.cudnn.benchmark = True

import onnx
import numpy as np
# import onnx_graphsurgeon as gs
from onnxsim import simplify

def parse_config():
    parser = argparse.ArgumentParser(description='arg parser')
    parser.add_argument('--work_dir',default='./trt/', help='work dir to save file')
    parser.add_argument('--cfg_file', type=str, default='./configs/stereo/kitti_models/dsgn2_cnn2d_384_bn2.yaml', help='specify the config for training')
    # basic testing options
    parser.add_argument('--batch_size', type=int, default=1, required=False, help='batch size for training')
    parser.add_argument('--ckpt',type=str, default='./outputs/configs_stereo_kitti_models/dsgn2_cnn2d_384_bn2.fresh_start/ckpt/checkpoint_epoch_60.pth', help='checkpoint file')    
    parser.add_argument('--fp16', action='store_true',default=False, help='Whether to use tensorrt fp16')
    parser.add_argument('--int8', action='store_true', help='Whether to use tensorrt int8')
    parser.add_argument('--fuse-conv-bn',action='store_true',help='Whether to fuse conv and bn, this will slightly increase'
        'the inference speed')
    args = parser.parse_args()

    cfg_from_yaml_file(args.cfg_file, cfg)

    return args, cfg


class Container(torch.nn.Module):
    def __init__(self, my_values):
        super().__init__()
        for key in my_values:
            setattr(self, key, my_values[key])



def main():
    args, cfg = parse_config()
    logger = common_utils.create_logger()

    # log to file
    logger.info('**********************Start logging**********************')

    test_set, test_loader, sampler = build_dataloader(
        dataset_cfg=cfg.DATA_CONFIG,
        class_names=cfg.CLASS_NAMES,
        batch_size=1,
        dist=False, workers=0, logger=logger, training=False
    )
    
    for i, batch_dict in enumerate(test_loader):
        if i == 0:
            data = batch_dict
        else:
            break
    model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
    cfg.MODEL['trt']=True
    model_trt = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
    
    with torch.no_grad():
        print("Loading model from checkpoint:{}".format(args.ckpt))
        model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True, strict=True)
        model.cuda().eval()
        model_trt.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True, strict=True)
        model_trt.cuda().eval()
        load_data_to_gpu(data)
        data['save_container'] = True
        batch_dict = model(data)
        targets =  ['dir_cls_preds', "cls_preds", "box_preds"]
        results = []
        for i in range(len(targets)):
            results.append(model.dense_head.forward_ret_dict[targets[i]])
        
        data_to_cpp = model.backbone_3d.data_to_cpp
        container = torch.jit.script(Container(data_to_cpp))
        container.save("container_cnn2d_half.pt")
        print('container saved.\n')
        from register_plugins import register
        register()
        downsampled_disp = data_to_cpp['downsampled_disp']
        norm_coord_imgs = data_to_cpp['norm_coord_imgs']
        valids = data_to_cpp['valids']
        batched_images = torch.cat([data['left_img'], data['right_img']], dim=0)

        if args.fp16:
            batched_images = batched_images.half()
            downsampled_disp = downsampled_disp.half()
            norm_coord_imgs = norm_coord_imgs.half()
            valids = valids.half()
        dir_cls_preds, cls_preds, box_preds = model_trt(batched_images, downsampled_disp, norm_coord_imgs, valids)
        model_prefix = "dsgn_r50_2d_bn2_60"
        model_onnx = args.work_dir+model_prefix+".onnx"
        torch.onnx.export(
            model_trt,
            (batched_images, downsampled_disp, norm_coord_imgs, valids),
            model_onnx,
            export_params=True,
            opset_version=12,  # the ONNX version to export the model to
            do_constant_folding=True,  # whether to execute constant folding for optimization
            input_names=['batched_images', 'downsampled_disp', 
                        'norm_coord_imgs', 'valids'],  # the model's input names
            output_names = ['dir_cls_preds', "cls_preds", "box_preds"],

        )
        print("convert sucessfully!")
    
    model_raw = onnx.load(model_onnx)
    try:
        onnx.checker.check_model(model_raw)
    except Exception:
        print('ONNX Model Incorrect')
    else:
        print('ONNX Model Correct')
    onnx_simp, check = simplify(model_raw)
    assert check, "Simplified ONNX model could not be validated"
    onnx.save(onnx_simp, model_onnx)
    

def infer():
    print("infer")
    args, cfg = parse_config()
    logger = common_utils.create_logger()
    logger.info('**********************Start logging**********************')

    test_set, test_loader, sampler = build_dataloader(
        dataset_cfg=cfg.DATA_CONFIG,
        class_names=cfg.CLASS_NAMES,
        batch_size=1,
        dist=False, workers=12, logger=logger, training=False
    )

    cfg.MODEL['trt']=True
    model_trt = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
    model_trt.load_engine('/cv/yc/DSGN2/trt/dsgn_r50_2d_bn2_60_fp16.engine')

    with torch.no_grad():
        model_trt.cuda().eval()
        eval_one_epoch_trt(
            cfg, model_trt, test_loader, 0, logger, dist_test=False,
            result_dir=Path('./outputs/trtengine_fp16'), save_to_file=True,
        )

if __name__ == '__main__':
    # main()
    infer()
