import torch
from torch import nn

torch.manual_seed(0)
torch.use_deterministic_algorithms(True)
torch.backends.cudnn.deterministic = True

import argparse
import time
import pickle as pkl
import os
import sys

sys.path.append('./')
import numpy as np

from onnxsim import simplify
from mmcv import Config
from mmcv.parallel import MMDataParallel
from mmdet3d.datasets import build_dataloader, build_dataset
from mmdet3d.models import build_detector
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
                         wrap_fp16_model)
from projects.mmdet3d_plugin.models.utils.positional_encoding import pos2posemb3d, pos2posemb1d, \
    nerf_positional_encoding
from projects.mmdet3d_plugin.models.utils.misc import MLN, topk_gather, transform_reference_points, memory_refresh, \
    SELayer_Linear

def inverse_sigmoid(x):
    x = x.clamp(min=0.001, max=0.999)
    return torch.log(x / 1-x)

class RTSPETR(nn.Module):
    def __init__(self, model, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.model = model

    def _post_update_memory(self, data_ego_pose, data_timestamp, rec_ego_pose, all_cls_scores, all_bbox_preds,
                            outs_dec):
        rec_reference_points = all_bbox_preds[..., :3][-1]
        rec_velo = all_bbox_preds[..., -2:][-1]
        rec_memory = outs_dec[-1]
        rec_score = all_cls_scores[-1].sigmoid().topk(1, dim=-1).values[..., 0:1]
        rec_timestamp = torch.zeros_like(rec_score, dtype=torch.float64)

        # topk proposals
        _, topk_indexes = torch.topk(rec_score, 128, dim=1)
        rec_timestamp = topk_gather(rec_timestamp, topk_indexes)
        rec_reference_points = topk_gather(rec_reference_points, topk_indexes).detach()
        rec_memory = topk_gather(rec_memory, topk_indexes).detach()
        rec_ego_pose = topk_gather(rec_ego_pose, topk_indexes)
        rec_velo = topk_gather(rec_velo, topk_indexes).detach()
        # 合并当前帧信息和历史信息
        head = self.model.pts_bbox_head
        head.memory_embedding = torch.cat([rec_memory, head.memory_embedding], dim=1)
        head.memory_timestamp = torch.cat([rec_timestamp, head.memory_timestamp], dim=1)
        head.memory_egopose = torch.cat([rec_ego_pose, head.memory_egopose], dim=1)
        head.memory_reference_point = torch.cat([rec_reference_points, head.memory_reference_point], dim=1)
        head.memory_velo = torch.cat([rec_velo, head.memory_velo], dim=1)
        head.memory_reference_point = transform_reference_points(head.memory_reference_point, data_ego_pose,
                                                                 reverse=False)
        head.memory_egopose = data_ego_pose.unsqueeze(1) @ head.memory_egopose

        # cast to float64 out-of-tensorrt
        # head.memory_timestamp -= data_timestamp.unsqueeze(-1).unsqueeze(-1)

    def _pts_head(self, x):
        head = self.model.pts_bbox_head

        B, N, C, H, W = x.shape
        num_tokens = N * H * W
        memory = x.permute(0, 1, 3, 4, 2).reshape(B, num_tokens, C)

        # 位置编码用new_pth提前保存
        # pos_embed, cone = self.position_embeding(data, memory_center, topk_indexes, img_metas)  # note: new_pth
        pos_embed = head.pos_embed.data
        cone = head.cone.data

        memory = head.memory_embed(memory)

        # spatial_alignment in focal petr
        memory = head.spatial_alignment(memory, cone)
        pos_embed = head.featurized_pe(pos_embed, memory)

        reference_points = head.reference_points.weight
        reference_points, attn_mask, mask_dict = head.prepare_for_dn(B, reference_points, {})

        # 位置编码用new_pth提前保存
        # query_pos = head.query_embedding(pos2posemb3d(reference_points))
        query_pos = head.query_pos.data

        tgt = torch.zeros_like(query_pos)

        # prepare for the tgt and query_pos using mln.
        query_pos_in = query_pos.detach()
        tgt, query_pos, reference_points, temp_memory, temp_pos, rec_ego_pose = head.temporal_alignment(query_pos,
                                                                                                        tgt,
                                                                                                        reference_points)

        # transformer here is a little different from PETR
        outs_dec, _ = head.transformer(memory, tgt, query_pos, pos_embed, attn_mask[0], temp_memory, temp_pos)
        outputs_classes = []
        outputs_coords = []
        reference = inverse_sigmoid(reference_points.clone())
        for lvl in range(len(outs_dec)-1,len(outs_dec)):#最后一层作为最终结果
            outputs_class = head.cls_branches[lvl](outs_dec[lvl])
            tmp = head.reg_branches[lvl](outs_dec[lvl])

            tmp[..., 0:3] += reference[..., 0:3]
            tmp[..., 0:3] = tmp[..., 0:3].sigmoid()

            outputs_coord = tmp
            outputs_classes.append(outputs_class)
            outputs_coords.append(outputs_coord)

        all_cls_scores = torch.stack(outputs_classes)
        all_bbox_preds = torch.stack(outputs_coords)
        all_bbox_preds[..., 0:3] = (
                all_bbox_preds[..., 0:3] * (head.pc_range[3:6] - head.pc_range[0:3]) + head.pc_range[0:3])

        return pos_embed, reference_points, tgt, temp_memory, temp_pos, \
            query_pos, query_pos_in, outs_dec, all_cls_scores, all_bbox_preds, rec_ego_pose

    def forward(self, imgs, data_timestamp, data_ego_pose, data_ego_pose_inv,
                memory_embedding, memory_reference_point, memory_timestamp, memory_egopose, memory_velo):
        img_feats = self.model.extract_img_feat(imgs, 1)

        # x[1, 6, 256, 16, 44]
        # pos_embed[1, 4224, 256]
        # cone[1, 4224, 8]
        head = self.model.pts_bbox_head

        # memory update before head
        # memory_timestamp += data_timestamp.unsqueeze(-1).unsqueeze(-1)
        memory_egopose = data_ego_pose_inv.unsqueeze(1) @ memory_egopose
        memory_reference_point = transform_reference_points(memory_reference_point, data_ego_pose_inv, reverse=False)

        head.memory_timestamp = memory_timestamp[:, :head.memory_len]
        head.memory_reference_point = memory_reference_point[:, :head.memory_len]
        head.memory_embedding = memory_embedding[:, :head.memory_len]
        head.memory_egopose = memory_egopose[:, :head.memory_len]
        head.memory_velo = memory_velo[:, :head.memory_len]

        (pos_embed, reference_points, tgt, temp_memory, temp_pos, query_pos, query_pos_in,
         outs_dec, all_cls_scores, all_bbox_preds, rec_ego_pose) = self._pts_head(img_feats)

        # memory update after head
        self._post_update_memory(data_ego_pose, data_timestamp, rec_ego_pose, all_cls_scores, all_bbox_preds, outs_dec)

        return all_cls_scores, all_bbox_preds, \
            head.memory_embedding, head.memory_reference_point, head.memory_timestamp, head.memory_egopose, head.memory_velo


def parse_args():
    parser = argparse.ArgumentParser(description='MMDet benchmark a model')
    parser.add_argument('config', help='test config file path')
    parser.add_argument('--section', help='section can be either extract_img_feat or pts_head_memory')
    parser.add_argument('--checkpoint', help='checkpoint file')
    parser.add_argument('--samples', default=300, help='samples to benchmark')
    parser.add_argument(
        '--log-interval', default=50, help='interval of logging')
    args = parser.parse_args()
    return args


def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    if hasattr(cfg, 'plugin'):
        if cfg.plugin:
            import importlib
            if hasattr(cfg, 'plugin_dir'):
                plugin_dir = cfg.plugin_dir
                _module_dir = os.path.dirname(plugin_dir)
                _module_dir = _module_dir.split('/')
                _module_path = _module_dir[0]

                for m in _module_dir[1:]:
                    _module_path = _module_path + '.' + m
                print(_module_path)
                plg_lib = importlib.import_module(_module_path)
            else:
                # import dir is the dirpath for the config file
                _module_dir = os.path.dirname(args.config)
                _module_dir = _module_dir.split('/')
                _module_path = _module_dir[0]
                for m in _module_dir[1:]:
                    _module_path = _module_path + '.' + m
                print(_module_path)
                plg_lib = importlib.import_module(_module_path)

    # build the model and load checkpoint
    cfg.model.train_cfg = None
    cfg.model.pts_bbox_head.new_pth = True

    model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    model = MMDataParallel(model, device_ids=[0])

    model.eval()
    model = model.float()

    tm = RTSPETR(model.module)

    tm = tm.float()
    tm.cpu()
    tm.eval()
    tm.training = False
    tm.model.pts_bbox_head.with_dn = False

    from tools.deploy.ndarray_io.ndarray_io import save_ndarray, load_ndarray

    dmem_init = 640
    inputs = [
        # torch.from_numpy(np.random.uniform(-0.5, 0.5, size=(1, 6, 3, 256, 704))).float(),
        # torch.from_numpy(np.random.uniform(-0.5, 0.5, size=(1,))).double(),
        # torch.from_numpy(np.random.uniform(-0.5, 0.5, size=(1, 4, 4))).float(),
        # torch.from_numpy(np.random.uniform(-0.5, 0.5, size=(1, 4, 4))).float(),
        torch.from_numpy(load_ndarray('/home/adt/codes/python/StreamPETR/tools/deploy/tmp/imgs.npy')).float(),
        torch.from_numpy(load_ndarray('/home/adt/codes/python/StreamPETR/tools/deploy/tmp/data_timestamp.npy')).double(),
        torch.from_numpy(load_ndarray('/home/adt/codes/python/StreamPETR/tools/deploy/tmp/data_ego_pose.npy')).float(),
        torch.from_numpy(load_ndarray('/home/adt/codes/python/StreamPETR/tools/deploy/tmp/data_ego_pose_inv.npy')).float(),
        torch.from_numpy(np.random.uniform(-0.0, 0.0, size=(1, dmem_init, 256))).float(),
        torch.from_numpy(np.random.uniform(-0.0, 0.0, size=(1, dmem_init, 3))).float(),
        torch.from_numpy(np.random.uniform(-0.0, 0.0, size=(1, dmem_init, 1))).float(),
        torch.from_numpy(np.random.uniform(-0.0, 0.0, size=(1, dmem_init, 4, 4))).float(),
        torch.from_numpy(np.random.uniform(-0.0, 0.0, size=(1, dmem_init, 2))).float(),
    ]
    input_names = ["imgs", "data_timestamp", "data_ego_pose", "data_ego_pose_inv",
                   "pre_memory_embedding",
                   "pre_memory_reference_point",
                   "pre_memory_timestamp",
                   "pre_memory_egopose",
                   "pre_memory_velo", ]
    output_names = ["all_cls_scores",
                    "all_bbox_preds",
                    "post_memory_embedding",
                    "post_memory_reference_point",
                    "post_memory_timestamp",
                    "post_memory_egopose",
                    "post_memory_velo"]

    onnx_path = args.checkpoint.replace('.pth', '.onnx')

    with torch.no_grad():
        # outputs = tm(*inputs)
        # pass
        args = tuple(inputs)
        torch.onnx.export(
            tm, args,
            onnx_path,
            opset_version=11,
            input_names=input_names,
            output_names=output_names,
            do_constant_folding=True,
            verbose=True)

    import onnx
    from onnxsim import simplify
    filename = onnx_path.replace('.onnx', '_simplify.onnx')
    onnx_model = onnx.load(onnx_path)
    onnx_model_simp, check = simplify(onnx_model)
    onnx.save(onnx_model_simp, filename)


if __name__ == '__main__':
    main()
