import warnings
warnings.filterwarnings('ignore')
import os
cwd = os.getcwd()
if cwd.endswith("tutorial"):
    os.chdir("../")

from pprint import pprint
import numpy as np
import matplotlib.pyplot as plt
import torch
from mmcv import Config
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmcv.parallel import scatter
from mmcv.cnn.utils.flops_counter import add_flops_counting_methods

from projects.mmdet3d_plugin.datasets.builder import build_dataloader
from projects.mmdet3d_plugin.datasets.utils import draw_lidar_bbox3d

import typing
from torch.onnx import symbolic_helper
from torch.onnx.symbolic_helper import parse_args
import torch
import pdb
_OPSET_VERSION = 16
_registered_ops: typing.AbstractSet[str] = set()
def register():
    """
    Register ONNX Runtime's built-in contrib ops.
	Should be run before torch.onnx.export().
	"""
    # @staticmethod
    # @parse_args('v', 'i', 'i', 'i', 'b')
    def grid_sampler(g, input, grid, mode, padding_mode, align_corners):
		# mode
		#   'bilinear'      : onnx::Constant[value={0}]
		#   'nearest'       : onnx::Constant[value={1}]
		#   'bicubic'       : onnx::Constant[value={2}]
		# padding_mode
		#   'zeros'         : onnx::Constant[value={0}]
		#   'border'        : onnx::Constant[value={1}]
		#   'reflection'    : onnx::Constant[value={2}]
        # grid = symbolic_helper._maybe_get_const(grid, "t")
        mode = symbolic_helper._maybe_get_const(mode, "i")
        padding_mode = symbolic_helper._maybe_get_const(padding_mode, "i")
        # mode_str = ["bilinear", "nearest", "bicubic"][mode]
        # padding_mode_str = ["zeros", "border", "reflection"][padding_mode]
        align_corners = int(symbolic_helper._maybe_get_const(align_corners, "b"))

		# From opset v13 onward, the output shape can be specified with
		# (N, C, H, W) (N, H_out, W_out, 2) => (N, C, H_out, W_out)
		# input_shape = input.type().sizes()
		# gird_shape = grid.type().sizes()
		# output_shape = input_shape[:2] + gird_shape[1:3]
		# g.op(...).setType(input.type().with_sizes(output_shape))
        inputs = [input, grid]
        kwargs = {
            "mode_i":mode, 
            "padding_mode_i":padding_mode, 
            "align_corners_i": align_corners}

        return g.op(
		    ## op name, modify here. not sure whether "com.microsoft::" is required
			"nvinfer1::GridSamplePluginDynamic",  
			*inputs, **kwargs
		)

    _reg(grid_sampler)
def _reg(symbolic_fn: typing.Callable):
	name = "::%s" % symbolic_fn.__name__
	torch.onnx.register_custom_op_symbolic(name, symbolic_fn, _OPSET_VERSION)
	_registered_ops.add(name)


class my_sparse4d(torch.nn.Module):
	def __init__(self,model):
		super().__init__()
		self.model = model
		# register_extra_symbolics(opset=9)
		register()
     	# dict_keys(['img_metas', 'img', 'timestamp', 'projection_mat', 'image_wh'])
	def forward_trt(self, feature_maps, projection_mat,image_wh):
		"""
		metas["projection_mat"].shape
		torch.Size([1, 6, 4, 4])

		metas.get("image_wh"),
		(tensor([[[704., 256.],
				[704., 256.],
				[704., 256.],
				[704., 256.],
				[704., 256.],
				[704., 256.]]], device='cuda:0'),)

		torch.save(metas["projection_mat"], "./projection_mat.pt")
		projection_mat = torch.load("./projection_mat.pt")

  
		torch.save(metas.get("image_wh"), "./image_wh.pt")
		image_wh = torch.load("./image_wh.pt")
		"""
		(
			instance_feature,
			anchor,
			temp_instance_feature,
			temp_anchor,
			time_interval,
		) = self.model.head.instance_bank.get_trt(1)
		# return {"feat":instance_feature}

		attn_mask = None
		dn_metas = None
		anchor_embed = self.model.head.anchor_encoder(anchor)
		temp_anchor_embed = None
		# return {"feat":anchor_embed}

		classification = []
		prediction = []
		quality = []
		infer_layers=True
		layer_num=0
		if(infer_layers):
			# pdb.set_trace()
			for i, op in enumerate(self.model.head.operation_order):
				layer_num+=1
				print("current layer num: ",layer_num,op)
				if self.model.head.layers[i] is None:
					continue
				elif op == "temp_gnn":
					instance_feature = self.model.head.graph_model(
						i,
						instance_feature,
						temp_instance_feature,
						temp_instance_feature,
						query_pos=anchor_embed,
						key_pos=temp_anchor_embed,
						attn_mask=attn_mask
						if temp_instance_feature is None
						else None,
					)
				elif op == "gnn":
					instance_feature = self.model.head.graph_model(
						i,
						instance_feature,
						value=instance_feature,
						query_pos=anchor_embed,
						attn_mask=attn_mask,
					)
				elif op == "norm" or op == "ffn":
					instance_feature = self.model.head.layers[i](instance_feature)
				elif op == "deformable":
					instance_feature = self.model.head.layers[i].forward_trt(
						instance_feature,
						anchor,
						anchor_embed,
						feature_maps,
						projection_mat,
						image_wh,
						# metas,# projection
					)
				elif op == "refine":
					anchor, cls, qt = self.model.head.layers[i](
						instance_feature,
						anchor,
						anchor_embed,
						time_interval=time_interval,
						return_cls=(
							self.model.head.training
							or len(prediction) == self.model.head.num_single_frame_decoder - 1
							or i == len(self.model.head.operation_order) - 1
						),
					)
					prediction.append(anchor)
					classification.append(cls)
					quality.append(qt)
					if len(prediction) == self.model.head.num_single_frame_decoder:
						# pdb.set_trace()
						instance_feature, anchor = self.model.head.instance_bank.update(
							instance_feature, anchor, cls
						)
						if (
							dn_metas is not None
							and self.model.head.sampler.num_temp_dn_groups > 0
							and dn_id_target is not None
						):
							(
								instance_feature,
								anchor,
								temp_dn_reg_target,
								temp_dn_cls_target,
								temp_valid_mask,
								dn_id_target,
							) = self.model.head.sampler.update_dn(
								instance_feature,
								anchor,
								dn_reg_target,
								dn_cls_target,
								valid_mask,
								dn_id_target,
								self.model.head.instance_bank.num_anchor,
								self.model.head.instance_bank.mask,
							)
					if i != len(self.model.head.operation_order) - 1:
						anchor_embed = self.model.head.anchor_encoder(anchor)
					if (
						len(prediction) > self.model.head.num_single_frame_decoder
						and temp_anchor_embed is not None
					):
						temp_anchor_embed = anchor_embed[
							:, : self.model.head.instance_bank.num_temp_instances
						]
				else:
					raise NotImplementedError(f"{op} is not supported.")

		print("layer_num************: ",layer_num)
		output = {}
		output.update(
			{
				"classification": classification,
				"prediction": prediction,
				"quality": quality,
			}
		)
		return output

		# return bev_embed, outputs_classes, outputs_coords

	def forward(self,img,projection_mat,image_wh):
		# import pdb;pdb.set_trace()
		feature_maps = self.model.extract_feat(img, metas=None)# list 3 , [1, 89760, 256], torch.Size([6, 4, 2]),torch.Size([6, 4])
		# output = {
		# 		"classification": feature_maps[0],
		# 		"prediction": feature_maps[1],
		# 		"quality": feature_maps[2],
		# 	}
		# return output
		# self.model.head.instance_bank.reset()
		# model_outs = self.model.head(feature_maps, data)
		model_outs = self.forward_trt(feature_maps,projection_mat,image_wh)
		return model_outs


import time
if __name__=="__main__":
	gpu_id = 0
	config = "sparse4dv3_temporal_r50_1x8_bs6_256x704"
	checkpoint = "/projects/zjh/det3d/sparse4d_trt/work_dirs/0816/latest.pth"
	cfg = Config.fromfile(f"projects/configs/{config}.py")
	# cfg.model["use_deformable_func"] = False
	# cfg.model["head"]["deformable_model"]["use_deformable_func"] = False
	img_norm_mean = np.array(cfg.img_norm_cfg["mean"])
	img_norm_std = np.array(cfg.img_norm_cfg["std"])
	
	#dataset 
	dataset = build_dataset(cfg.data.val)
	dataloader = build_dataloader(
		dataset,
		samples_per_gpu=1,
		workers_per_gpu=0,
		dist=False,
		shuffle=False,
	)
	data_iter = dataloader.__iter__()
	data = next(data_iter)
	data = scatter(data, [gpu_id])[0]

	#model 
	model = build_detector(cfg.model)
	model = model.cuda(gpu_id)
	_ = model.load_state_dict(torch.load(checkpoint)["state_dict"], strict=False)
	model = model.eval()
	assert model.use_deformable_func, "Please compile deformable aggregation first !!!"
	print("model.use_deformable_func : ",model.use_deformable_func)

	my_model = my_sparse4d(model)
	my_model.eval()

	#infer
	use_my_model=True
	use_org_model=False
	if use_my_model:
		times_list = []
		for i in range(1):
			start_time=time.time()

			# import pdb;pdb.set_trace()
			data = next(data_iter)
			# data = scatter(data, [0])[0]
			""" 
			data['img'].data[0].shape
			torch.Size([1, 6, 3, 256, 704])
			"""
			img_tensor = data['img'].data[0].cuda()
			projection_mat = data['projection_mat'].data.cuda()
			image_wh = data['image_wh'].data.cuda()
			# projection_mat = torch.load("projection_mat.pt")
			# image_wh = torch.load("image_wh.pt")
			model_outs = my_model.forward(img_tensor,projection_mat,image_wh)

			time_lantcy=time.time() - start_time
			times_list.append(time_lantcy)
			print(i,time_lantcy)
		print(np.array(times_list).mean())
		print("Output Keys:", model_outs.keys())

	elif use_org_model:
		times_list = []
		for i in range(1):
			start_time=time.time()
			# import pdb;pdb.set_trace()
			data = next(data_iter)
			data = scatter(data, [0])[0]
			# print("sparse4d model print: \n extract_feat: \n",model.extract_feat)
			feature_maps = model.extract_feat(data["img"], metas=data)
			# list 3 , [1, 89760, 256], torch.Size([6, 4, 2]),torch.Size([6, 4])
			model.head.instance_bank.reset()
			# print("sparse4d model print: \n head: \n",model.head)
			model_outs = model.head(feature_maps, data)
			print("sparse4d model print: start\n ")
			print(model)
			print("sparse4d model print: finished\n ")
			
			time_lantcy=time.time() - start_time
			times_list.append(time_lantcy)
			print(i,time_lantcy)
		print(np.array(times_list).mean())
		print("Output Keys:", model_outs.keys())

	#export onnx
	print("***************** ")
	# import pdb;pdb.set_trace()
	export_onnx=True
	if export_onnx:
		data = next(data_iter)
		""" 
		data['img'].data[0].shape
		torch.Size([1, 6, 3, 256, 704])
		"""
		img_tensor = data['img'].data[0].cuda()
		projection_mat = data['projection_mat'].data.cuda()
		image_wh = data['image_wh'].data.cuda()
		f = "sparse4d.onnx"
		with torch.no_grad():
			torch.onnx.export(
				model=my_model,
				args=(img_tensor,projection_mat,image_wh),
				f =f,
				do_constant_folding=False,
				input_names=["img","projection_mat","image_wh"], 
				output_names=["classification","prediction","quality"],
				opset_version=14
			)
		# solve slice problem
		import onnx
		from onnxsim import simplify
		model_raw = onnx.load(f)
		onnx_simp, check = simplify(model_raw)
		onnx.save(onnx_simp, f)
		print("export onnx sucdess:  ",f)

