import argparse
import mmcv
import os
from os import path as osp
import torch
import warnings
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (
    get_dist_info,
    init_dist,
    load_checkpoint,
    wrap_fp16_model,
)
import onnx
from onnxsim import simplify

import torch
from torch import nn
from typing import Optional, Dict, Any
from mmdet.apis import single_gpu_test, multi_gpu_test, set_random_seed
from mmdet.datasets import replace_ImageToTensor, build_dataset
from mmdet.datasets import build_dataloader as build_dataloader_origin
from mmdet.models import build_detector
import sys
sys.path.append(os.getcwd())
from projects.mmdet3d_plugin.datasets.builder import build_dataloader
from projects.mmdet3d_plugin.apis.test import custom_multi_gpu_test


def parse_args():
    parser = argparse.ArgumentParser(
        description="export model to onnx!"
    )
    parser.add_argument("config", help="test config file path")
    parser.add_argument("checkpoint", help="checkpoint file")
    parser.add_argument(
        "--fuse-conv-bn",
        action="store_true",
        help="Whether to fuse conv and bn, this will slightly increase"
        "the inference speed",
    )
    args = parser.parse_args()
    return args


class e2e_model(nn.Module):
    def __init__(self, model):
        super(e2e_model, self).__init__()
        self.model = model

    def forward_bev(self, img,pre_feat,trans_mat,image_wh,lidar2img,dt):
        bev_feature,feature,_ = self.model.extract_bev_feat_deploy(img,pre_feat,trans_mat,image_wh, lidar2img,dt)
        return bev_feature,feature
    
    def forward_motion_planing(self, 
                               bev_feature,
                               box_inst_feat,
                               box_anchor_embed,
                               box_classification,
                               box_prediction,
                               map_inst_feat,
                               map_anchor_embed,
                               map_classification,
                               map_prediction,
                                inst_feat_queue,
                                anchor_queue,
                                ego_anchor,
                                ego_feat_queue,
                                ego_anchor_queue,
                                temp_mask,
                               ):

        (
            motion_classification,
            motion_prediction,
            planning_classification,
            planning_prediction,
            planning_status
         ) = self.model.head.motion_plan_head.forward_deploy(
                               bev_feature,
                               box_inst_feat,
                               box_anchor_embed,
                               box_classification,
                               box_prediction,
                               map_inst_feat,
                               map_anchor_embed,
                               map_classification,
                               map_prediction,
                                inst_feat_queue,
                                anchor_queue,
                                ego_anchor,
                                ego_feat_queue,
                                ego_anchor_queue,
                                temp_mask,
                                self.model.head.det_head.anchor_encoder.forward_depoly)
        return motion_classification,motion_prediction,planning_classification,planning_prediction,planning_status

    def forward_box(self, bev_feature,image_wh,lidar2img,dt,inst_feat,anchor,tmp_inst_feat,tmp_anchor,):
        (
            classification,
            prediction,
            quality,
            instance_feature,
            anchor_embed,
            anchor,
        ) = self.model.head.det_head.forward_deploy(
            *bev_feature,
            dt,
            inst_feat,
            anchor,
            tmp_inst_feat,
            tmp_anchor,
            image_wh,lidar2img)
        return classification,prediction,quality,instance_feature,anchor_embed,anchor
    def forward_map(self, bev_feature,image_wh,lidar2img,dt,inst_feat,anchor,tmp_inst_feat,tmp_anchor,):
        (
            classification,
            prediction,
            quality,
            instance_feature,
            anchor_embed,
            anchor,
        ) = self.model.head.map_head.forward_deploy(
            *bev_feature,
            dt,
            inst_feat,
            anchor,
            tmp_inst_feat,
            tmp_anchor,
            image_wh,
            lidar2img)
        return classification,prediction,quality,instance_feature,anchor_embed,anchor
    def forward_occ(self,bev_feature):
        out = self.model.head.occ_head(bev_feature,{})
        return out
    def forward(self,img,pre_feat,trans_mat,image_wh,lidar2img,dt,
                box_inst_feat,box_anchor,box_tmp_inst_feat,box_tmp_anchor,
                map_inst_feat,map_anchor,map_tmp_inst_feat,map_tmp_anchor,
                inst_feat_queue,
                anchor_queue,
                ego_anchor,
                ego_feat_queue,
                ego_anchor_queue,
                temp_mask,):
        _bev_feature,bev_feature = self.forward_bev(img,pre_feat,trans_mat,image_wh,lidar2img,dt)
        out = self.forward_occ(_bev_feature)
        occ_flow = out['flow']
        occ_0 = out['occupancy'][0]
        # occ_1 = out['occupancy'][1]
        # occ_2 = out['occupancy'][2]
        # occ_output=(occ_0,occ_1,occ_2,occ_flow)
        # occ_output=(occ_0,occ_1,occ_2,occ_flow)
        occ_output=(occ_0,occ_flow)
        box_output=(None,)
        map_output=(None,)
        box_output = self.forward_box(bev_feature,
        image_wh,
        lidar2img,
        dt,
        box_inst_feat,
        box_anchor,
        box_tmp_inst_feat,
        box_tmp_anchor)
        box_classification,box_prediction,box_quality,box_instance_feature,box_anchor_embed,box_anchor = box_output
        map_output = self.forward_map(
            bev_feature,
            image_wh,
            lidar2img,
            dt,
            map_inst_feat,
            map_anchor,
            map_tmp_inst_feat,
            map_tmp_anchor)
        map_classification,map_prediction,quality,instance_feature,map_anchor_embed,anchor = map_output
        # box_output = box_classification,box_prediction,box_quality,box_instance_feature,box_anchor_embed,box_anchor,box_cls
        motion_plan_output = self.forward_motion_planing(
                               bev_feature,
                               box_inst_feat,
                               box_anchor_embed,
                               box_classification,
                               box_prediction,
                               map_inst_feat,
                               map_anchor_embed,
                               map_classification,
                               map_prediction,
                                inst_feat_queue,
                                anchor_queue,
                                ego_anchor,
                                ego_feat_queue,
                                ego_anchor_queue,
                                temp_mask,
                               )
        return (bev_feature[0], *box_output, *map_output, *motion_plan_output, *occ_output)
def main():
    args = parse_args()
    args.save_onnx = 'test_bev_box_map_motionplan.onnx'
    cfg = Config.fromfile(args.config)
    # if args.cfg_options is not None:
    #     cfg.merge_from_dict(args.cfg_options)
    # import modules from string list.
    if cfg.get("custom_imports", None):
        from mmcv.utils import import_modules_from_strings

        import_modules_from_strings(**cfg["custom_imports"])

    # os.makedirs(os.path.dirname(args.work_dir), exist_ok=True)
    # logger, console_handler, file_handler = set_logger(args.log, True)
    # logger.setLevel(logging.DEBUG)
    # console_handler.setLevel(logging.DEBUG)
    # file_handler.setLevel(logging.DEBUG)

    # logger.info("Export Sparse4d Backbone Onnx...")

    # import modules from plguin/xx, registry will be updated
    if hasattr(cfg, "plugin"):
        if cfg.plugin:
            import importlib

            if hasattr(cfg, "plugin_dir"):
                plugin_dir = cfg.plugin_dir
                _module_dir = os.path.dirname(plugin_dir)
                _module_dir = _module_dir.split("/")
                _module_path = _module_dir[0]

                for m in _module_dir[1:]:
                    _module_path = _module_path + "." + m
                print(_module_path)
                plg_lib = importlib.import_module(_module_path)
            else:
                # import dir is the dirpath for the config file
                _module_dir = os.path.dirname(args.config)
                _module_dir = _module_dir.split("/")
                _module_path = _module_dir[0]
                for m in _module_dir[1:]:
                    _module_path = _module_path + "." + m
                print(_module_path)
                plg_lib = importlib.import_module(_module_path)

    # set work dir
    if cfg.get('work_dir', None) is None:
        # use config filename as default work_dir if cfg.work_dir is None
        cfg.work_dir = osp.join('./work_dirs',
                                osp.splitext(osp.basename(args.config))[0]) 
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
    cfg.data.test.work_dir = cfg.work_dir
    print('work_dir: ',cfg.work_dir)

    # build the model and load checkpoint
    cfg.model.train_cfg = None
    model = build_detector(cfg.model, test_cfg=cfg.get("test_cfg"))
    # model = build_model(cfg.model, test_cfg=cfg.get("test_cfg"))
    # checkpoint = load_checkpoint(model, args.checkpoint, map_location="cpu")
    if args.fuse_conv_bn:
        model = fuse_conv_bn(model)
    e2emodel = e2e_model(model).cuda()

    BS = 1
    NUMS_CAM = 6
    C = 3
    INPUT_H = 256
    INPUT_W = 704
    bev_H = 70
    bev_W = 70
    embeddims=256

    img = torch.randn(NUMS_CAM, C, INPUT_H, INPUT_W).cuda()
    pre_feat = torch.randn(1,embeddims,bev_H,bev_W).cuda()
    trans_mat = torch.randn(1,4,4).cuda()
    image_wh = torch.tensor([704,256]).reshape(1,1,-1).repeat(1,6,1).cuda()
    print(image_wh.shape)
    lidar2img=torch.randn(1,6,4,4).cuda()
    dt = torch.randn(1).cuda()
    point_cloud_range =[-56.0, -56.0, -5.0, 56.0, 56.0, 5.4]
    box_inst_feat = torch.randn(1,900,embeddims).cuda()
    box_anchor=torch.randn(1,900,11).cuda()
    box_tmp_inst_feat=torch.randn(1,600,embeddims).cuda()
    box_tmp_anchor=torch.randn(1,600,11).cuda()
    map_inst_feat=torch.randn(1,100,embeddims).cuda()
    map_anchor=torch.randn(1,100,40).cuda()
    map_tmp_inst_feat=torch.randn(1,33,embeddims).cuda()
    map_tmp_anchor=torch.randn(1,33,40).cuda()
    bev_feature = torch.randn(1,4900,256).cuda()
    inst_feat_queue = torch.randn(900,3,embeddims).cuda()
    anchor_queue = torch.randn(900,3,11).cuda()
    ego_anchor=torch.randn(1,1,11).cuda()
    ego_feat_queue=torch.randn(1,3,embeddims).cuda()
    ego_anchor_queue=torch.randn(1,3,11).cuda()
    temp_mask=torch.randn(901,4).cuda()
    dummy_input = [img,pre_feat,trans_mat,image_wh,lidar2img,dt]
    # dummy_input =[bev_feature,image_wh,lidar2img,dt]
    
    dummy_input_box = [box_inst_feat,box_anchor,box_tmp_inst_feat,box_tmp_anchor]
    dummy_input_map = [map_inst_feat,map_anchor,map_tmp_inst_feat,map_tmp_anchor]
    dummy_input_motionplan = [inst_feat_queue,anchor_queue,ego_anchor,ego_feat_queue,ego_anchor_queue,temp_mask]
    dummy_input.extend(dummy_input_box)
    dummy_input.extend(dummy_input_map)
    dummy_input.extend(dummy_input_motionplan)
    output = e2emodel(*dummy_input)
    print("infer OK !!!")
    with torch.no_grad():
        torch.onnx.export(
            e2emodel,
            tuple(dummy_input),
            args.save_onnx,
            input_names=["img","pre_feat","dpose","image_wh","lidar2img","dt",
            # input_names=["img","image_wh","lidar2img","dt",
                         "box_inst_feat","box_anchor","box_tmp_inst_feat","box_tmp_anchor",
                         "map_inst_feat","map_anchor","map_tmp_inst_feat","map_tmp_anchor",
                        "inst_feat_queue","anchor_queue","ego_anchor","ego_feat_queue","ego_anchor_queue","temp_mask"
                         ],
            output_names=[
                "bev_feature",        
                # "spatial_shapes",
                # "level_start_index",
                "box_classification",
                "box_prediction",
                "box_quality",
                "box_instance_feature",
                "box_anchor_embed",
                "box_anchor",
                "map_classification",
                "map_prediction",
                # "map_quality",
                "map_instance_feature",
                "map_anchor_embed",
                "map_anchor",
                
                "motion_classification",
                "motion_prediction",
                "planning_classification",
                "planning_prediction",
                "planning_status",

                "occ_0",
                # "occ_1",
                # "occ_2",
                "occ_flow",
                ],
            opset_version=16,
            do_constant_folding=True,
            verbose=False,
        )

        onnx_orig = onnx.load(args.save_onnx)
        onnx_simp, check = simplify(onnx_orig)
        assert check, "Simplified ONNX model could not be validated"
        onnx.save(onnx_simp, args.save_onnx)
        print(f'🚀 Export onnx completed. ONNX saved in "{args.save_onnx}" 🤗.')
        # logger.info(f'🚀 Export onnx completed. ONNX saved in "{args.save_onnx}" 🤗.')

if __name__ == "__main__":
    torch.multiprocessing.set_start_method(
        "fork"
    )  # use fork workers_per_gpu can be > 1
    main()
