import torch
from torch import nn

torch.manual_seed(0)
torch.use_deterministic_algorithms(True)
torch.backends.cudnn.deterministic = True

import argparse
import time
import pickle as pkl
import os
import sys

sys.path.append('./')
import numpy as np

from onnxsim import simplify
from mmcv import Config
from mmcv.parallel import MMDataParallel
from mmdet3d.datasets import build_dataloader, build_dataset
from mmdet3d.models import build_detector
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
                         wrap_fp16_model)
from projects.mmdet3d_plugin.models.utils.positional_encoding import pos2posemb3d, pos2posemb1d, \
    nerf_positional_encoding
from projects.mmdet3d_plugin.models.utils.misc import MLN, topk_gather, transform_reference_points, memory_refresh, \
    SELayer_Linear

def inverse_sigmoid(x, eps=1e-5):
    # clamp 要同时限制最小和最大
    x = x.clamp(min=0.001, max=0.999)
    x1 = 1 - x
    return torch.log(x / x1)

class EogDriveDeployer(nn.Module):
    def __init__(self, model, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.model = model
        self.model.pts_bbox_head._init_onnx()

        self._name_head_layers(self.model.pts_bbox_head, prefix="FP32")

        self.print_head_layers()
        pass

    def _name_head_layers(self, module, prefix=""):
        """
        递归遍历模块并为所有子模块分配唯一名称
        Args:
            module: 当前模块
            prefix: 名称前缀（如 "head"）
        """
        for idx, child in enumerate(module.children()):
            # 生成唯一名称（如 head.conv1, head.bn1）
            layer_name = f"{prefix}.{child.__class__.__name__.lower()}_{idx}"

            # 如果是容器模块（如 Sequential），递归处理
            if isinstance(child, nn.Sequential) or isinstance(child, nn.ModuleList):
                self._name_head_layers(child, prefix=layer_name)
            else:
                # 为非容器模块直接赋值 name 属性
                child.name = layer_name

    def print_head_layers(self):
        """打印 head 模块的所有层名"""

        def _print_layers(module, prefix=""):
            for name, child in module.named_children():
                full_name = f"{prefix}.{name}" if prefix else name
                if isinstance(child, (nn.Sequential, nn.ModuleList)):
                    _print_layers(child, prefix=full_name)
                else:
                    print(f"Layer: {full_name}, Type: {child.__class__.__name__}")

        _print_layers(self.model.pts_bbox_head)

    def _post_update_memory(self, all_cls_scores, all_bbox_preds,
                            outs_dec):
        rec_reference_points = all_bbox_preds[..., :3][-1]
        rec_memory = outs_dec[-1]
        rec_score = all_cls_scores[-1].sigmoid().topk(1, dim=-1).values[..., 0:1] #每个目标的分数

        head = self.model.pts_bbox_head
        # head.lifebars += 2*(rec_score - 0.4) # 更新血条
        # head.lifebars = head.lifebars.clip(0,10)#血条限制在0-10之间
        # lifebars_now = head.lifebars
        # topk proposals   根据血条(历史分数)或者当前分数选择topk
        _, topk_indexes = torch.topk(rec_score, 128, dim=1)
        # _, topk_indexes = torch.topk(lifebars_now, 128, dim=1)

        # # 连续帧更新
        # head.lifebars = topk_gather(lifebars_now, topk_indexes).detach()
        # head.lifebars = torch.cat((torch.ones(1,300,1)*5, head.lifebars),dim=1) #初始当前query的血条

        rec_reference_points = topk_gather(rec_reference_points, topk_indexes).detach()
        rec_memory = topk_gather(rec_memory, topk_indexes).detach()
        # 合并当前帧信息和历史信息
        head.memory_embedding = torch.cat([rec_memory, head.memory_embedding], dim=1)
        head.memory_reference_point = torch.cat([rec_reference_points, head.memory_reference_point], dim=1)

        # return lifebars_now
        # # cast to float64 out-of-tensorrt  zwh:我直接利用初始时间为基准把时间戳转成float32
        # head.memory_timestamp -= data_timestamp.unsqueeze(-1).unsqueeze(-1)

    def _pts_head(self, data, deploy_layer):
        head = self.model.pts_bbox_head

        mm_memorys = [] # 多模态特征
        mm_pos_embeds = [] #多模态位置编码
        if 'c' in head.mode:
            x = data['img_feats']
            B, N, C, H, W = x.shape
            num_tokens = N * H * W
            memory = x.permute(0, 1, 3, 4, 2).reshape(B, num_tokens, C)

            # 位置编码用new_pth提前保存
            # pos_embed, cone = self.position_embeding(data, memory_center, topk_indexes, img_metas)  # note: new_pth
            pos_embed = head.pos_embed.data
            cone = head.cone.data

            memory = head.memory_embed(memory)

            # spatial_alignment in focal petr
            memory = head.spatial_alignment(memory, cone)

            pos_embed = head.featurized_pe(pos_embed, memory)

            mm_memorys.append(memory)
            mm_pos_embeds.append(pos_embed)

        if 'l' in head.mode:
            x = data['lidar_feats']
            B, c, h, w = x.shape
            lidar_memory = x.permute(0, 2, 3, 1).reshape(B, h * w, c).contiguous()

            if head.with_focal:
                focal_outs = head.focal_head([x])
                heatmaps = []
                for ret_dict in focal_outs:
                    heatmaps.append(ret_dict[0]['heatmap']) #此刻的0只是输入的第一个特征层，3d检测的bev特征层通常只有一个(因为2d才有缩放，3d是真实的尺寸)
                heatmaps = torch.cat(heatmaps, dim=1)
                heatmap_max = torch.max(heatmaps, dim=1)[0].view(B, -1)
                num_sample_tokens = int(head.num_sample_tokens)
                _, topk_indexes = torch.topk(heatmap_max, num_sample_tokens, dim=1)

                lidar_memory = topk_gather(lidar_memory, topk_indexes, dim=1)

            lidar_memory = head.lidar_memory_embed(lidar_memory)
            # new_pth 保存中间结果，转onnx时候只需要
            lidar_pos_embed = head.lidar_pos_embed.data

            if head.with_focal:
                lidar_pos_embed = topk_gather(lidar_pos_embed, topk_indexes, dim=1)

            mm_memorys.append(lidar_memory)
            mm_pos_embeds.append(lidar_pos_embed)

        assert len(mm_memorys) == len(mm_pos_embeds) and len(mm_memorys) == len(head.mode) and len(mm_memorys)>0
        if len(mm_memorys) == 1:
            memory = mm_memorys[0]
            pos_embed = mm_pos_embeds[0]
        else:
            memory = torch.cat(mm_memorys, dim=1)
            pos_embed = torch.cat(mm_pos_embeds, dim=1)

        reference_points = head.reference_points.weight
        reference_points, attn_mask, mask_dict = head.prepare_for_dn(B, reference_points, {})

        # 位置编码用new_pth提前保存
        # query_pos = head.query_embedding(pos2posemb3d(reference_points))
        query_pos = head.query_pos.data

        tgt = torch.zeros_like(query_pos)

        # prepare for the tgt and query_pos using mln.
        query_pos_in = query_pos.detach()

        if not head.sparse:
            tgt, query_pos, reference_points, temp_memory, temp_pos, rec_ego_pose = (
                head.temporal_alignment(query_pos, tgt, reference_points))

            # transformer here is a little different from PETR
            outs_dec, _ = head.transformer(memory, tgt, query_pos, pos_embed=pos_embed, attn_masks=attn_mask[0],
                                           temp_memory=temp_memory, temp_pos=temp_pos,deploy_layer=deploy_layer)
            outputs_classes = []
            outputs_coords = []
            reference = inverse_sigmoid(reference_points.clone())
            for lvl in range(len(outs_dec)-1,len(outs_dec)):#最后一层作为最终结果
                outputs_class = head.cls_branches[lvl](outs_dec[lvl])
                tmp = head.reg_branches[lvl](outs_dec[lvl])
                # tmp2 = head.reg_branches[lvl](outs_dec[lvl])

                tmp[..., 0:3] += reference[..., 0:3]
                tmp[..., 0:3] = tmp[..., 0:3].sigmoid()

                outputs_coord = tmp
                outputs_classes.append(outputs_class)
                outputs_coords.append(outputs_coord)

            all_cls_scores = torch.stack(outputs_classes)
            all_bbox_preds = torch.stack(outputs_coords)
            all_bbox_preds[..., 0:3] = (
                    all_bbox_preds[..., 0:3] * (head.pc_range[3:6] - head.pc_range[0:3]) + head.pc_range[0:3])#省的c++ 处理

        return pos_embed, reference_points, tgt, temp_memory, temp_pos, query_pos, query_pos_in, outs_dec, all_cls_scores, all_bbox_preds, rec_ego_pose

    def forward(self, imgs, voxel_features, coords_id, voxel_mask,
                memory_embedding, memory_reference_point, deploy_layer=6):
        data=dict()
        if 'c' in self.model.mode:
            img_feats = self.model.extract_img_feat(imgs)
            data['img_feats'] = img_feats
        if 'l' in self.model.mode:
            pillars_feats = self.model.lidar_hand.export_onnx(voxel_features, coords_id, voxel_mask)
            pts_feats = self.model.lidar_backbone(pillars_feats)
            lidar_feats = self.model.lidar_neck(pts_feats)[0]
            data['lidar_feats']=lidar_feats

        head = self.model.pts_bbox_head

        # memory update before head

        head.memory_reference_point = memory_reference_point[:, :head.memory_len]
        head.memory_embedding = memory_embedding[:, :head.memory_len]
        # head.lifebars = lifebars
        (pos_embed, reference_points, tgt, temp_memory, temp_pos, query_pos, query_pos_in,
         outs_dec, all_cls_scores, all_bbox_preds, rec_ego_pose) = self._pts_head(data, deploy_layer)
        # memory update after head
        # self._post_update_memory(all_cls_scores, all_bbox_preds, outs_dec)

        all_cls_scores = all_cls_scores.sigmoid()  # 省的c++ 处理

        layer_num = len(outs_dec)
        return all_cls_scores, all_bbox_preds, outs_dec[layer_num-1]


def parse_args():
    parser = argparse.ArgumentParser(description='MMDet benchmark a model')
    parser.add_argument('config', help='test config file path')
    parser.add_argument('--checkpoint', help='checkpoint file')
    args = parser.parse_args()
    return args


def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    if hasattr(cfg, 'plugin'):
        if cfg.plugin:
            import importlib
            if hasattr(cfg, 'plugin_dir'):
                plugin_dir = cfg.plugin_dir
                _module_dir = os.path.dirname(plugin_dir)
                _module_dir = _module_dir.split('/')
                _module_path = _module_dir[0]

                for m in _module_dir[1:]:
                    _module_path = _module_path + '.' + m
                print(_module_path)
                plg_lib = importlib.import_module(_module_path)
            else:
                # import dir is the dirpath for the config file
                _module_dir = os.path.dirname(args.config)
                _module_dir = _module_dir.split('/')
                _module_path = _module_dir[0]
                for m in _module_dir[1:]:
                    _module_path = _module_path + '.' + m
                print(_module_path)
                plg_lib = importlib.import_module(_module_path)

    # build the model and load checkpoint
    cfg.model.train_cfg = None
    if 'pts_bbox_head' in cfg.model:
        cfg.model.pts_bbox_head.new_pth = True # 设置new pth模型，直接从权重获取位置编码
    if 'det_head' in cfg.model:
        cfg.model.det_head.new_pth = True
    model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    model = MMDataParallel(model, device_ids=[0])

    model.eval()
    model = model.float()

    tm = EogDriveDeployer(model.module)

    tm = tm.float()
    tm.cpu()
    tm.eval()
    tm.training = False
    tm.model.pts_bbox_head.with_dn = False

    from tools.deploy.ndarray_io.ndarray_io import save_ndarray, load_ndarray

    dmem_init = 640
    voxel_size = 40000
    inputs = [
        torch.from_numpy(np.random.uniform(-0.0, 0.0, size=(1,cfg['model']['det_head']['num_camera'], 3, 256, 704))).float(),
        torch.from_numpy(np.random.uniform(-0.0, 0.0, size=(voxel_size, 9, 32))).float(),
        torch.from_numpy(np.random.uniform(-0.0, 0.0, size=(voxel_size,))).int(),
        torch.from_numpy(np.random.uniform(-0.0, 0.0, size=(voxel_size, 1))).float(),
        torch.from_numpy(np.random.uniform(-0.0, 0.0, size=(1, dmem_init, 256))).float(),
        torch.from_numpy(np.random.uniform(-0.0, 0.0, size=(1, dmem_init, 3))).float(),
    ]

    frame = str(0)
    print(f"读入idx{frame}测试pillar特征: ")
    from tools.deploy.ndarray_io.ndarray_io import load_ndarray, get_cosine_similarity
    voxel_features = load_ndarray(f'/home/adt/test/tensors/cplus/PrepLidar_input_pillars{frame}.npy')[:,:,:,0]
    coords_idxs = load_ndarray(f'/home/adt/test/tensors/cplus/PrepLidar_input_coord_idx{frame}.npy')[:,0,0,0]
    voxel_mask = load_ndarray(f'/home/adt/test/tensors/cplus/PrepLidar_input_mask{frame}.npy')[:,:,0,0]
    pre_memory_embedding = load_ndarray(f'/home/adt/test/tensors/cplus/pre_memory_embedding{frame}.npy')[:,:,:,0]
    pre_memory_reference_point = load_ndarray(f'/home/adt/test/tensors/cplus/pre_memory_reference_point{frame}.npy')[:,:,:,0]
    # lifebars = np.ones(shape=(1,428,1),dtype=np.float32)*5 # 历史query的血条，当前血条模型内部补充，满血为10,初始是5
    # from tools.deploy.ndarray_io.ndarray_io import load_ndarray, save_ndarray
    # save_ndarray(lifebars, "/home/adt/codes/c++/obs_new/adt-wukong2/opt/config/pcore/eog_drive",'init_pre_lifebars')
    inputs = [
        torch.from_numpy(np.random.uniform(-0.0, 0.0, size=(1,cfg['model']['det_head']['num_camera'], 3, 256, 704))).float(),
        torch.from_numpy(voxel_features).float(),
        torch.from_numpy(coords_idxs).int(),
        torch.from_numpy(voxel_mask).float(),
        torch.from_numpy(pre_memory_embedding).float(),
        torch.from_numpy(pre_memory_reference_point).float(),
        # torch.from_numpy(lifebars).float(),
        # torch.from_numpy(np.random.uniform(-10.0, 10.0, size=(1, dmem_init, 3))).float(),
    ]

    # inputs = [
    #     torch.from_numpy(np.random.uniform(-0.0, 0.0, size=(1,cfg['model']['det_head']['num_camera'], 3, 256, 704))).half(),
    #     torch.from_numpy(voxel_features).half(),
    #     torch.from_numpy(coords_idxs).int(),
    #     torch.from_numpy(voxel_mask).half(),
    #     torch.from_numpy(pre_memory_embedding).float(),
    #     torch.from_numpy(pre_memory_reference_point).float(),
    #     # torch.from_numpy(np.random.uniform(-10.0, 10.0, size=(1, dmem_init, 256))).float(),
    #     # torch.from_numpy(np.random.uniform(-10.0, 10.0, size=(1, dmem_init, 3))).float(),
    # ]
    deploy_layer = 6 #代表用多少层进行部署
    input_names = ["imgs",#输入图像尺寸
                   'voxel_features',#输入有效pillar
                   'coords_idxs',#有效pillar在grid中的索引
                   'voxel_mask',#有效pillar在grid中mask
                   "pre_memory_embedding",#缓存的目标 query特征
                   "pre_memory_reference_point",#缓存的目标参考点
                   # "pre_lifebars", #目标的血条
                   ]
    output_names = ["all_cls_scores",
                    "all_bbox_preds",
                    "out_query",
                    # "post_memory_embedding",
                    # "post_memory_reference_point",
                    # "post_memory_reference_point",
                    # "post_lifebars",  # 目标的血条
                    # "cur_lifebars",
                    ]

    onnx_path = args.checkpoint.replace('.pth', '.onnx')

    with torch.no_grad():
        # outputs = tm(*inputs)
        # pass
        args = tuple(inputs)
        torch.onnx.export(
            tm, args,
            onnx_path,
            opset_version=11,
            input_names=input_names,
            output_names=output_names,
            do_constant_folding=True,
            verbose=True)
        print(f'保存onnx到{onnx_path}')

        import onnx
        onnx.checker.check_model(onnx_path)
        print(f'检查{onnx_path}')

    from onnxsim import simplify
    filename = onnx_path.replace('.onnx', '_simplify.onnx')
    onnx_model = onnx.load(onnx_path)
    onnx_model_simp, check = simplify(onnx_model)
    onnx.save(onnx_model_simp, filename)
    print(f'保存简化onnx到{filename}')


if __name__ == '__main__':
    main()
