import argparse
import os
import re
import glob
from pathlib import Path
import numpy as np
import torch
import time
from torch import nn 
from tools.eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, update_cfg_by_args, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network
from pcdet.utils import common_utils
from pcdet.utils import calibration_kitti as calibration
import torch.nn.functional as F
from pcdet.models import load_data_to_gpu

import typing
from torch.onnx import symbolic_helper
from torch.onnx.symbolic_helper import parse_args

torch.backends.cudnn.benchmark = True

import onnx
import numpy as np
# import onnx_graphsurgeon as gs
from onnxsim import simplify



_OPSET_VERSION = 11
_registered_ops: typing.AbstractSet[str] = set()

def parse_config():
    parser = argparse.ArgumentParser(description='arg parser')
    parser.add_argument('--cfg_file', type=str, default='./configs/stereo/kitti_models/dsgn2_wwdata_384x896.yaml', help='specify the config for training')

    # basic testing options
    parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
    parser.add_argument('--workers', type=int, default=0, help='number of workers for dataloader')
    parser.add_argument('--exp_name', type=str, default='fast', help='exp path for this experiment')
    # parser.add_argument('--exp_name', type=str, default='default', help='exp path for this experiment')
    parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
    parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')
    parser.add_argument('--save_to_file', action='store_true', default=False, help='')
    # loading options
    parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to evaluate')
    parser.add_argument('--load_eval', action='store_true', default=False, help='')
    parser.add_argument('--ckpt_id', type=int, default=58, help='checkpoint id to evaluate')
    parser.add_argument('--start_epoch', type=int, default=0, help='')
    # distributed options
    parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
    parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
    parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
    # config options
    parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER, help='set extra config keys if needed')
    parser.add_argument('--trainval', action='store_true', default=False, help='')
    parser.add_argument('--imitation', type=str, default="2d")
    parser.add_argument('--save_depth', action='store_true', default=False)
    parser.add_argument('--convert_onnx', action='store_true', default=False)

    args = parser.parse_args()

    cfg_from_yaml_file(args.cfg_file, cfg)
    update_cfg_by_args(cfg, args)
    cfg.TAG = Path(args.cfg_file).stem
    cfg.EXP_GROUP_PATH = '_'.join(args.cfg_file.split('/')[1:-1])  # remove 'cfgs' and 'xxxx.yaml'

    np.random.seed(1024)

    assert args.ckpt or args.ckpt_id, "pls specify ckpt or ckpt_dir or ckpt_id"

    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs, cfg)

    return args, cfg

def register():
    """
    Register ONNX Runtime's built-in contrib ops.
	Should be run before torch.onnx.export().
	"""
    # @staticmethod
    # @parse_args('v', 'i', 'i', 'i', 'b')
    def grid_sampler(g, input, grid, mode, padding_mode, align_corners):
		# mode
		#   'bilinear'      : onnx::Constant[value={0}]
		#   'nearest'       : onnx::Constant[value={1}]
		#   'bicubic'       : onnx::Constant[value={2}]
		# padding_mode
		#   'zeros'         : onnx::Constant[value={0}]
		#   'border'        : onnx::Constant[value={1}]
		#   'reflection'    : onnx::Constant[value={2}]
        # grid = symbolic_helper._maybe_get_const(grid, "t")
        mode = symbolic_helper._maybe_get_const(mode, "i")
        padding_mode = symbolic_helper._maybe_get_const(padding_mode, "i")
        # mode_str = ["bilinear", "nearest", "bicubic"][mode]
        # padding_mode_str = ["zeros", "border", "reflection"][padding_mode]
        align_corners = int(symbolic_helper._maybe_get_const(align_corners, "b"))

		# From opset v13 onward, the output shape can be specified with
		# (N, C, H, W) (N, H_out, W_out, 2) => (N, C, H_out, W_out)
		# input_shape = input.type().sizes()
		# gird_shape = grid.type().sizes()
		# output_shape = input_shape[:2] + gird_shape[1:3]
		# g.op(...).setType(input.type().with_sizes(output_shape))
        inputs = [input, grid]
        kwargs = {
            "mode_i":mode, 
            "padding_mode_i":padding_mode, 
            "align_corners_i": align_corners}

        return g.op(
		    ## op name, modify here. not sure whether "com.microsoft::" is required
			"nvinfer1::GridSamplePluginDynamic",  
			*inputs, **kwargs
		)

    _reg(grid_sampler)

def _reg(symbolic_fn: typing.Callable):
	name = "::%s" % symbolic_fn.__name__
	torch.onnx.register_custom_op_symbolic(name, symbolic_fn, _OPSET_VERSION)
	_registered_ops.add(name)


class wrapper_model(nn.Module):
    def __init__(self, model, D_cv, H_cv, W_cv):
        super().__init__()
        self.model = model
        self.D_cv = D_cv
        self.H_cv = H_cv
        self.W_cv = W_cv
        self.maxdisp = 288
        self.downsample_disp = 4
        self.H_voxel = 10
        self.W_voxel = 152
        self.D_voxel = 144

    def forward(self, left_img, right_img, downsampled_disp, norm_coord_imgs, valids):
        
        # right_img = torch.as_tensor(right_img, dtype=torch.float32).cuda()
        left_features, right_features = self.model.backbone_3d.feature_backbone([left_img, right_img])
        batched_features_list = [torch.cat([left_features[i], right_features[i]], dim=0) for i in range(len(left_features))]
        # left_stereo_feat, left_sem_feat = self.model.backbone_3d.feature_neck(left_features)
        # right_stereo_feat, right_sem_feat = self.model.backbone_3d.feature_neck(right_features) # wont use
        batched_stereo_feat, batched_sem_feat = self.model.backbone_3d.feature_neck(batched_features_list)
        N = 1
        left_stereo_feat, left_sem_feat = batched_stereo_feat[:N], batched_sem_feat[:N]
        right_stereo_feat, right_sem_feat = batched_stereo_feat[N:], batched_sem_feat[N:]
        sem_features = self.model.backbone_3d.sem_neck([left_sem_feat])
        cost_raw = self.model.backbone_3d.build_cost(left_stereo_feat, right_stereo_feat,
                                None, None, downsampled_disp)
        D_cv, H_cv, W_cv = self.D_cv, self.H_cv, self.W_cv
        cost0 = self.model.backbone_3d.dres0_relu(self.model.backbone_3d.dres0_bn3d(self.model.backbone_3d.dres0_conv2d(cost_raw.view(N, -1, H_cv, W_cv)).view(N, -1, D_cv, H_cv, W_cv)))
        out = self.model.backbone_3d.dres1_bn3d(self.model.backbone_3d.dres1_conv2d(cost0.view(N, -1, H_cv, W_cv)).view(N, -1, D_cv, H_cv, W_cv)) + cost0
        
        Voxel_psv = F.grid_sample(out, norm_coord_imgs, align_corners=True) # out: ([1, 32, 72, 120, 232]) norm_coord_img:([1, 20, 304, 288, 3])  # Voxel:([1, 32, 20, 304, 288])
        Voxel_psv = Voxel_psv * valids
        

        # ######### Voxel lss
        left_lss_feat = self.model.backbone_3d.mono_depthnet(sem_features[-3])
        depth_lss_volume = left_lss_feat[:, :self.maxdisp // self.downsample_disp] # ([1, 72, 120, 232])

        depth_lss_volume_s = depth_lss_volume.softmax(1)
        left_lss_volume = depth_lss_volume_s.unsqueeze(1) * left_lss_feat[:,self.maxdisp//self.downsample_disp:,:].unsqueeze(2) # ([1, 288, 60, 116]), ([1, 32, 60, 116])
        left_lss_volume = self.model.backbone_3d._forward_voxel_net(left_lss_volume)
        Voxel_lss_3d = F.grid_sample(left_lss_volume, norm_coord_imgs, align_corners=True)
        Voxel_lss_3d = Voxel_lss_3d * valids
        
        ########## fusion vols
        Voxel = self.model.backbone_3d.rpn3d_convs(torch.cat([Voxel_psv, Voxel_lss_3d],dim=1)) 
        pre, post = True, True
        for hg_stereo_module in self.model.backbone_3d.rpn3d_hgs:
            Voxel, pre, post = hg_stereo_module(Voxel, pre, post) # ([1, 32, 20, 304, 288])
        Voxel = self.model.backbone_3d.rpn3d_pool(Voxel)

        # ############ HeightCompression
        # N, C, D, H, W = Voxel.shape
        spatial_features = Voxel.view(N, -1, self.D_voxel, self.W_voxel)
        # ########### HgBEVBackbone
        # print("start backbone 2d")
        x = self.model.backbone_2d.rpn3d_conv2(spatial_features) # ([1, 64, 304, 288])
        x = self.model.backbone_2d.rpn3d_conv3(x, None, None)[0] # ([1, 64, 304, 288])

        ########### det head
        # cls_features = x # ([1, 64, 304, 288])
        # reg_features = x
        cls_features = self.model.dense_head.rpn3d_cls_convs(x)
        reg_features = self.model.dense_head.rpn3d_bbox_convs(x)
        box_preds = self.model.dense_head.conv_box(reg_features) # ([1, 140, 304, 288])
        box_preds = box_preds.permute(0, 2, 3, 1)  # [N, H, W, C] ([1, 304, 288, 140])
        cls_preds = self.model.dense_head.conv_cls(cls_features)
        cls_preds = cls_preds.permute(0, 2, 3, 1)
        dir_cls_preds = self.model.dense_head.conv_dir_cls(cls_features)
        dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1)
        return dir_cls_preds, cls_preds, box_preds



class Container(torch.nn.Module):
    def __init__(self, my_values):
        super().__init__()
        for key in my_values:
            setattr(self, key, my_values[key])

def main():
    args, cfg = parse_config()
    if args.launcher == 'none':
        dist_test = False
        total_gpus = 1
    else:
        total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
            args.tcp_port, args.local_rank, backend='nccl'
        )
        dist_test = True

    if args.batch_size is None:
        args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
    else:
        assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
        args.batch_size = args.batch_size // total_gpus

    if args.ckpt_id:
        assert args.exp_name

        output_dir = cfg.ROOT_DIR / 'outputs' / cfg.EXP_GROUP_PATH / (cfg.TAG + '.' + args.exp_name)
        args.ckpt = str(output_dir / 'ckpt' / 'checkpoint_epoch_{}.pth'.format(args.ckpt_id))
    elif args.ckpt:
        output_dir = Path(args.ckpt + ".eval")
        output_dir.mkdir(parents=True, exist_ok=True)
    else:
        raise ValueError("no ckpt specified")

    if not os.path.exists(output_dir):
        print('--- ! output_dir does not exist, please check the config file and the extra tag')
        output_dir.mkdir(parents=True, exist_ok=True)
    if args.eval_tag == 'eval_with_train':
        eval_output_dir = output_dir / 'eval' / 'eval_with_train'
    else:
        eval_output_dir = output_dir / 'eval' / args.eval_tag

    num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
    epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
    eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']

    if not args.eval_tag :
        eval_output_dir = eval_output_dir / 'default'
    eval_output_dir.mkdir(parents=True, exist_ok=True)

    log_file = eval_output_dir / ('log_eval.txt')
    logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)

    # log to file
    logger.info('**********************Start logging**********************')
    gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
    logger.info('CUDA_VISIBLE_DEVICES={}'.format(gpu_list))
    logger.info('eval output dir: {}'.format(eval_output_dir))

    if dist_test:
        logger.info('total_batch_size: {}'.format(total_gpus * args.batch_size))
    for key, val in vars(args).items():
        logger.info('{:16} {}'.format(key, val))
    log_config_to_file(cfg, logger=logger)

    test_set, test_loader, sampler = build_dataloader(
        dataset_cfg=cfg.DATA_CONFIG,
        class_names=cfg.CLASS_NAMES,
        batch_size=args.batch_size,
        dist=dist_test, workers=args.workers, logger=logger, training=False
    )
    
    for i, batch_dict in enumerate(test_loader):
        if i == 0:
            data = batch_dict
        else:
            break
    model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
    cfg.MODEL['trt']=True
    model_trt = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
    ## register for non-registered onnx ops -- grid sample

    with torch.no_grad():
        import cv2
        print("Loading model from checkpoint:{}".format(args.ckpt))
        model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test, strict=True)
        for name, param in model.named_parameters():
            # 将权重转换为 FP16
            fp16_param = param.data.half()
            # 检查是否有任何权重为 0
            if torch.any(fp16_param == 0):
                print(f"权重下溢出: {name} contains weights of value 0 in FP16")
                param.data[torch.where(fp16_param == 0)]=0
        model.cuda().eval()
        model_trt.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test, strict=True)
        model_trt.cuda().eval()
        for name, param in model_trt.named_parameters():
            # 将权重转换为 FP16
            fp16_param = param.data.half()
            # 检查是否有任何权重为 0
            if torch.any(fp16_param == 0):
                print(f"权重下溢出: {name} contains weights of value 0 in FP16")
                param.data[torch.where(fp16_param == 0)]=0
        # random_input = True
        # if random_input:
        #     left = torch.randn((1, 3, 384, 896)).cuda()
        #     right = torch.randn((1, 3, 384, 896)).cuda()
        # else:
        #     left = cv2.imread("/cv/yc/DSGN2/data/ww/training/image_2/1692074207250000.png")
        #     right = cv2.imread("/cv/yc/DSGN2/data/ww/training/image_3/1692074207250000.png")
        #     left = left[:384, :, :]
        #     right = right[:384, :, :]
        #     mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
        #     std = np.array([0.229, 0.224, 0.225], dtype=np.float32)
        #     left = (left.astype(np.float32) / 255 - mean) / std
        #     right = (right.astype(np.float32) / 255 - mean) / std
        #     left = np.pad(left, ((0, 0), (0, 28),(0, 0)), mode='constant')
        #     right = np.pad(right, ((0, 0), (0, 28),(0, 0)), mode='constant')
        #     left = torch.as_tensor(left, dtype=torch.float32).cuda().unsqueeze(0).permute(0,3,1,2)
        #     right = torch.as_tensor(right, dtype=torch.float32).cuda().unsqueeze(0).permute(0,3,1,2)
        load_data_to_gpu(data)
        data['save_container'] = True
        batch_dict = model(data)
        targets =  ['dir_cls_preds', "cls_preds", "box_preds"]
        results = []
        for i in range(len(targets)):
            results.append(model.dense_head.forward_ret_dict[targets[i]])
        
        data_to_cpp = model.backbone_3d.data_to_cpp
        # data_to_cpp = batch_dict['container']
        container = torch.jit.script(Container(data_to_cpp))
        # container.save("container_fast.pt")
        print('container saved.\n')
        
        # dropmsv_model = wrapper_model(model=model, D_cv=72, W_cv=224, H_cv=96)
        register()
        downsampled_disp = data_to_cpp['downsampled_disp']
        norm_coord_imgs = data_to_cpp['norm_coord_imgs']
        valids = data_to_cpp['valids']
        # valids2d = data_to_cpp['valids2d']
        # psv_disps_channels = data_to_cpp['psv_disps_channels']
        # voxel_disps_channels = data_to_cpp['voxel_disps_channels']
        dir_cls_preds, cls_preds, box_preds = model_trt(data['left_img'], data['right_img'], downsampled_disp, norm_coord_imgs, valids)

        results[0] == dir_cls_preds
        model_file = "dsgn_fast_halfsave.onnx"
        torch.onnx.export(
            model_trt,
            (data['left_img'], data['right_img'], downsampled_disp, norm_coord_imgs, valids),
            model_file,
            export_params=True,
            opset_version=12,  # the ONNX version to export the model to
            do_constant_folding=True,  # whether to execute constant folding for optimization
            # keep_initializers_as_inputs=True,
            input_names=['left_img', 'right_img', 'downsampled_disp', 
                        'norm_coord_imgs', 'valids'],  # the model's input names
            output_names = ['dir_cls_preds', "cls_preds", "box_preds"]  # the model's output names
        )
        print("convert sucessfully!")

        model_raw = onnx.load(model_file)
        print("Onnx model loaded sucessfully!")
        
        
        # onnx_trim_post = simplify_postprocess(model_raw)
        onnx_simp, check = simplify(model_raw)
        assert check, "Simplified ONNX model could not be validated"
        # onnx_final = simplify_preprocess(onnx_simp)
        onnx.save(onnx_simp, model_file)
        


if __name__ == '__main__':
    main()
