# ------------------------------------------------------------------------
# Copyright (c) 2022 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR3D (https://github.com/WangYueFt/detr3d)
# Copyright (c) 2021 Wang, Yue
# ------------------------------------------------------------------------
# Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d)
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
#  Modified by Shihao Wang
# ------------------------------------------------------------------------
import torch
from os import path as osp
import mmcv
import numpy as np
from PIL import Image
from mmcv.runner import force_fp32, auto_fp16
from mmdet.models import DETECTORS
from mmdet3d.core import bbox3d2result
from mmdet3d.models.detectors.mvx_two_stage import MVXTwoStageDetector
from projects.mmdet3d_plugin.models.utils.grid_mask import GridMask
from projects.mmdet3d_plugin.models.utils.misc import locations
from mmcv.parallel import DataContainer as DC
from mmdet3d.core import (Box3DMode, Coord3DMode, bbox3d2result,
                          merge_aug_bboxes_3d)
from tools.visualizer.export_video import featmap_view
from tools.visualizer.zwh_viewer import draw_boxes2img
from tools.visualizer.show_result import show_result
from mmdet3d.models import builder
@DETECTORS.register_module()
class EogL(MVXTwoStageDetector):
    """EogL."""

    def __init__(self,
                 mode='c',
                 use_grid_mask=False,
                 img_backbone=None,
                 img_neck=None,
                 lidar_hand=None,
                 lidar_backbone=None,
                 lidar_neck=None,
                 det_head=None,
                 depth_branch=None,
                 train_cfg=None,
                 test_cfg=None,
                 num_frame_head_grads=2,
                 num_frame_backbone_grads=2,
                 num_frame_losses=2,
                 stride=16,
                 position_level=0,
                 aux_2d_only=True,
                 single_test=False,
                 pretrained=None):
        super(EogL, self).__init__(train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained)
        self.grid_mask = GridMask(True, True, rotate=1, offset=False, ratio=0.5, mode=1, prob=0.7)
        self.use_grid_mask = use_grid_mask
        self.prev_scene_token = None
        self.num_frame_head_grads = num_frame_head_grads
        self.num_frame_backbone_grads = num_frame_backbone_grads
        self.num_frame_losses = num_frame_losses
        self.single_test = single_test
        self.stride = stride
        self.position_level = position_level
        self.aux_2d_only = aux_2d_only
        self.test_flag = False
        self.depth_branch = builder.build_head(depth_branch) if depth_branch is not None else None
        self.mode = mode
        if 'l' in self.mode:
            if lidar_hand is not None:
                self.lidar_hand = builder.build_backbone(lidar_hand)
            if lidar_backbone is not None:
                self.lidar_backbone = builder.build_backbone(lidar_backbone)
            if lidar_neck is not None:
                self.lidar_neck = builder.build_neck(lidar_neck)
        if 'c' in self.mode:
            self.img_backbone = builder.build_backbone(img_backbone)
            self.img_neck = builder.build_neck(img_neck)
        if det_head:
            pts_train_cfg = train_cfg.pts if train_cfg else None
            det_head.update(train_cfg=pts_train_cfg)
            pts_test_cfg = test_cfg.pts if test_cfg else None
            det_head.update(test_cfg=pts_test_cfg)
            self.pts_bbox_head = builder.build_head(det_head)

        print(f'EOG\n输入模态: {self.mode}\n输出模态: Det<{det_head==True}>')
        pass

    def extract_img_feat(self, img, len_queue=1, training_mode=False):
        """Extract features of images."""
        if 'c' not in self.mode:
            return None
        B, N, C, H, W = img.size()
        img = img.reshape(B * N, C, H, W)
        if self.use_grid_mask:
            img = self.grid_mask(img)
        img_feats = self.img_backbone(img)
        if self.with_img_neck:
            img_feats = self.img_neck(img_feats)
        BN, C, H, W = img_feats[self.position_level].size()
        img_feats_reshaped = img_feats[self.position_level].view(B, N, C, H, W)
        return img_feats_reshaped

    def extract_lidar_feat(self, pts):
        if 'l' not in self.mode:
            return None
        x = self.lidar_hand(pts)

        # print("testingggggggg")
        # from tools.deploy.ndarray_io.ndarray_io import load_ndarray, get_cosine_similarity
        # xxxx = load_ndarray('/home/adt/test/tensors/lidar_feats.npy')
        # print(get_cosine_similarity(xxxx,
        #                             x.detach().cpu().numpy()))

        x = self.lidar_backbone(x)
        x = self.lidar_neck(x)
        return x[0]

    @auto_fp16(apply_to=('img'), out_fp32=True)
    def extract_feat(self, img=None, points=None, **kwargs):
        """Extract features from images and points."""
        img_feats = self.extract_img_feat(img)
        lidar_feats = self.extract_lidar_feat(points)
        return img_feats, lidar_feats


    def prepare_location(self, img_metas, **data):
        if 'img_feats' not in data or data['img_feats'] is None:
            return None
        pad_h, pad_w, _ = img_metas[0]['pad_shape'][0]
        bs, n = data['img_feats'].shape[:2]
        x = data['img_feats'].flatten(0, 1)
        location = locations(x, self.stride, pad_h, pad_w)[None].repeat(bs*n, 1, 1, 1)
        return location

    def forward_roi_head(self, location, **data):
        if (self.aux_2d_only and not self.training) or not self.with_img_roi_head:
            return {'topk_indexes':None}
        else:
            outs_roi = self.img_roi_head(location, **data)
            return outs_roi


    def forward_pts_train(self,
                          gt_bboxes_3d,
                          gt_labels_3d,
                          gt_agent_fut_trajs=None,
                          gt_agent_fut_masks=None,
                          gt_ego_fut_trajs=None,
                          gt_ego_fut_masks=None,
                          img_metas=None,
                          **data):
        outs = self.pts_bbox_head([data['lidar_feats']])

        loss_inputs = [gt_bboxes_3d, gt_labels_3d, outs]
        losses = self.pts_bbox_head.loss(*loss_inputs)
        return losses

    @force_fp32(apply_to=('img'))
    def forward(self, return_loss=True, forward_video=False, **data):
        """Calls either forward_train or forward_test depending on whether
        return_loss=True.
        Note this setting will change the expected inputs. When
        `return_loss=True`, img and img_metas are single-nested (i.e.
        torch.Tensor and list[dict]), and when `resturn_loss=False`, img and
        img_metas should be double nested (i.e.  list[torch.Tensor],
        list[list[dict]]), with the outer list indicating test time
        augmentations.
        """
        if return_loss:
            for key in ['gt_bboxes_3d', 'gt_labels_3d', 'img_metas']:
                data[key] = tuple(data[key])
            return self.forward_train(**data)
        else:
            if forward_video:
                return self.forward_video(**data)
            else:
                return self.forward_test(**data)

    def forward_train(self,
                      img_metas=None,
                      gt_bboxes_3d=None,
                      gt_labels_3d=None,
                      gt_agent_fut_trajs=None,
                      gt_agent_fut_masks=None,
                      gt_ego_fut_trajs=None,
                      gt_ego_fut_masks=None,
                      seq_id=None,
                      index_id=None,
                      **data):
        ## 用于验证训练数据是否按照时序顺序输入
        # for i in range(len(data['prev_exists'])):
        #     print('batch <{}> prev_exists:{}  scene_token:{}  index_id:{}'.format(i, np.array(data['prev_exists'][i].detach().cpu().numpy().flatten(),np.int32),
        #                                                           img_metas[i]['scene_token'],
        #                                                           index_id[i].item()))
        if self.test_flag:
            # 评测的第一帧就reset缓存，之后缓存的reset与否交给数据流
            self.pts_bbox_head.reset_memory()
            self.test_flag = False

        img_feats, lidar_feats = self.extract_feat(**data)
        data['img_feats']=img_feats
        data['lidar_feats']=lidar_feats

        loss = self.forward_pts_train(gt_bboxes_3d, gt_labels_3d,
                                      gt_agent_fut_trajs, gt_agent_fut_masks,
                                      gt_ego_fut_trajs, gt_ego_fut_masks,
                                      img_metas, **data)
        if self.depth_branch is not None and 'img_feats' in data and data['img_feats'] is not None:
            loss['loss_aux_dense_depth'] = self.depth_branch([data['img_feats']], data['focal'], data['gt_depth'])

        return loss
  
  
    def forward_test(self, img_metas, rescale,
                     seq_id=None,
                     index_id=None,
                     **data):
        # print('prev_exists:{}  seq_id:{}  index_id:{}'.format(np.array(data['prev_exists'][0][0].detach().cpu().numpy().flatten(),np.int32),
        #                                                       seq_id[0][0].detach().cpu().numpy(),
        #                                                       index_id[0][0].detach().cpu().numpy()))
        self.test_flag = True
        for var, name in [(img_metas, 'img_metas')]:
            if not isinstance(var, list):
                raise TypeError('{} must be a list, but got {}'.format(
                    name, type(var)))
        for key in data:
            if key != 'img':
                data[key] = data[key][0][0].unsqueeze(0)
            else:
                data[key] = data[key][0]
        return self.simple_test(img_metas[0], **data)

    def simple_test_pts(self, img_metas, **data):
        """Test function of point cloud branch."""

        outs = self.pts_bbox_head([data['lidar_feats']])

        # print(self.pts_bbox_head)
        # add query
        from mmdet.models.losses import focal_loss
        bbox_list = self.pts_bbox_head.get_bboxes(
            outs, img_metas)
        bbox_results = [
            bbox3d2result(bboxes, scores, labels)
            for bboxes, scores, labels in bbox_list
        ]

        # print('only testinggggg')
        # bbox_list = self.pts_bbox_head.focal_head.get_bboxes(
        #     outs['focal'], img_metas)
        # bbox_results = [
        #     bbox3d2result(bboxes, scores, labels, labels)
        #     for bboxes, scores, labels in bbox_list
        # ]
        return bbox_results
    
    def simple_test(self, img_metas, **data):
        """Test function without augmentaiton."""

        # from tools.deploy.ndarray_io.ndarray_io import save_ndarray
        # root_path = '/home/adt/codes/python/StreamPETR/tools/deploy/tmp/'
        # save_ndarray(data['img'], root_path, 'imgs')
        # save_ndarray(data['timestamp'], root_path,'data_timestamp')
        # save_ndarray(data['ego_pose'], root_path,'data_ego_pose')
        # save_ndarray(data['ego_pose_inv'], root_path,'data_ego_pose_inv')

        # frame = str(0)
        # print(f"读入idx{frame}测试点云")
        # from tools.deploy.ndarray_io.ndarray_io import load_ndarray, save_ndarray
        # batched_pts = load_ndarray(f'/home/adt/test/tensors/cplus/PrepLidar_Input{frame}.npy')
        # batched_pts = torch.Tensor(batched_pts).unsqueeze(0).cuda()
        # data['points'] = batched_pts
        #
        # print(f"读入idx{frame}测试定位")
        # data['ego_pose'] = torch.eye(4).unsqueeze(0).to(data['ego_pose'])
        # data['ego_pose_inv'] = torch.eye(4).unsqueeze(0).to(data['ego_pose_inv'])

        # from tools.deploy.ndarray_io.ndarray_io import load_ndarray, save_ndarray
        # save_ndarray(data['points'], '/home/adt/test/tensors/py','PrepLidar_Input.npy')
        # save_ndarray(data['timestamp'], '/home/adt/test/tensors/py','PrepTime.npy')
        # save_ndarray(data['ego_pose'], '/home/adt/test/tensors/py','PrepPose.npy')
        # save_ndarray(data['ego_pose_inv'], '/home/adt/test/tensors/py','PrepPose_inv.npy')
        # print("save_ndarray done")

        # from projects.mmdet3d_plugin.datasets.tictoc import TicToc, TicTocManager
        # tictoc = TicToc("extract_feat", with_manager= True)
        img_feats, lidar_feats = self.extract_feat(**data)
        data['img_feats']=img_feats
        data['lidar_feats']=lidar_feats
        # tictoc.toc()
        # tictoc2 = TicToc("head", with_manager= True)

        bbox_list = [dict() for i in range(len(img_metas))]
        bbox_pts = self.simple_test_pts(
            img_metas, **data)
        # tictoc2.toc()


        # manager = TicTocManager()
        # if len(manager.data['extract_feat'])==15:
        #     manager.save_to_file("ego_drive.json")
        #     print('over')

        for result_dict, pts_bbox in zip(bbox_list, bbox_pts):
            result_dict['pts_bbox'] = pts_bbox
        return bbox_list

    def forward_video(self, img_metas, **data):
        self.test_flag = True
        for var, name in [(img_metas, 'img_metas')]:
            if not isinstance(var, list):
                raise TypeError('{} must be a list, but got {}'.format(
                    name, type(var)))
        for key in data:
            if key != 'img':
                data[key] = data[key][0][0].unsqueeze(0)
            else:
                data[key] = data[key][0]
        img_metas=img_metas[0]

        # print("testinggggggg")
        # from tools.deploy.ndarray_io.ndarray_io import load_ndarray, get_cosine_similarity
        # PrepTime = load_ndarray('/home/adt/test/tensors/PrepTime.npy')
        # PrepPose = load_ndarray('/home/adt/test/tensors/PrepPose.npy')
        # PrepPose_inv = load_ndarray('/home/adt/test/tensors/PrepPose_inv.npy')
        # PrepTime = torch.Tensor(PrepTime.squeeze(-1)).cuda()
        # PrepPose = torch.Tensor(PrepPose.squeeze(-1)).cuda()
        # PrepPose_inv = torch.Tensor(PrepTime[0,0,0]).unsqueeze(0).cuda()
        # data['ego_pose'] = PrepPose
        # data['ego_pose_inv'] = PrepPose_inv
        # data['timestamp'] = PrepPose_inv
        # data['timestamp'] = torch.Tensor([0]).cuda()

        # print(get_cosine_similarity(xxxx,
        #                             x.detach().cpu().numpy()))
        # data['ego_pose'] = torch.zeros((4,4)).unsqueeze(0).to(data['ego_pose'])
        # data['ego_pose_inv'] = torch.zeros((4,4)).unsqueeze(0).to(data['ego_pose_inv'])
        # data['ego_pose'] = torch.eye(4).unsqueeze(0).to(data['ego_pose'])
        # data['ego_pose_inv'] = torch.eye(4).unsqueeze(0).to(data['ego_pose_inv'])


        img_feats, lidar_feats = self.extract_feat(**data)
        data['img_feats']=img_feats
        data['lidar_feats']=lidar_feats

        bbox_list = [dict() for i in range(len(img_metas))]
        bbox_pts = self.simple_test_pts(
            img_metas, **data)
        for result_dict, pts_bbox in zip(bbox_list, bbox_pts):
            result_dict['pts_bbox'] = pts_bbox

        from projects.mmdet3d_plugin.models.utils.misc import points2bev
        # 返回bev点云
        bev = points2bev(data['points'][0].detach().cpu().numpy(),
                         self.pts_bbox_head.bbox_coder.pc_range,
                         # voxelsize=self.lidar_hand.voxelizer.voxel_layer.voxel_size[0],
                         voxelsize=0.1,
                         z_threshold=0.0,
                         )

        return bbox_list, bev

    def show_results(self, data, result, out_dir, show, score_thr):
        """Results visualization.

        Args:
            data (dict): Input points and the information of the sample.
            result (dict): Prediction results.
            out_dir (str): Output directory of visualization result.
        """
        for batch_id in range(len(result)):
            # 图像
            img_files = data['img_metas'][0]._data[0][batch_id]['filename']
            lidar2imgs = data['img_metas'][0]._data[0][0]['lidar2img_ori']
            # 点云
            if isinstance(data['points'][0], DC):
                points = data['points'][0]._data[0][batch_id].numpy()
            elif mmcv.is_list_of(data['points'][0], torch.Tensor):
                points = data['points'][0][batch_id]
            else:
                ValueError(f"Unsupported data type {type(data['points'][0])} "
                           f'for visualization!')
            if isinstance(data['img_metas'][0], DC):
                pts_filename = data['img_metas'][0]._data[0][batch_id][
                    'pts_filename']
                box_mode_3d = data['img_metas'][0]._data[0][batch_id][
                    'box_mode_3d']
            elif mmcv.is_list_of(data['img_metas'][0], dict):
                pts_filename = data['img_metas'][0][batch_id]['pts_filename']
                box_mode_3d = data['img_metas'][0][batch_id]['box_mode_3d']
            else:
                ValueError(
                    f"Unsupported data type {type(data['img_metas'][0])} "
                    f'for visualization!')
            file_name = osp.split(pts_filename)[-1].split('.')[0]

            assert out_dir is not None, 'Expect out_dir, got none.'
            #box
            pred_bboxes = result[batch_id]['pts_bbox']['boxes_3d']
            pred_labels = result[batch_id]['pts_bbox']['labels_3d']
            # instance_ids = result[batch_id]['pts_bbox']['attrs_3d']

            if score_thr is not None:
                mask = result[batch_id]['pts_bbox']['scores_3d'] > score_thr
                pred_bboxes = pred_bboxes[mask]
                pred_labels = pred_labels[mask]
                # instance_ids = instance_ids[mask]
                # print('instance:', instance_ids)

            pred_bboxes = pred_bboxes.tensor.cpu().numpy()

            for idx in range(len(img_files)):
                from copy import deepcopy
                boxes = deepcopy(pred_bboxes)[:,:7]
                boxes[:,2]=boxes[:,2]+0.5*boxes[:,5]

                img = data['img'][0].data[0][batch_id][idx].detach().cpu().numpy().transpose(1, 2, 0)
                img_cyw = mmcv.imdenormalize(
                    img, np.array([123.675, 116.28, 103.53]), np.array([58.395, 57.12, 57.375]), True).astype(np.uint8)
                lidar2img = data['lidar2img'][0].data[0][0].numpy()[idx]
                # img = draw_boxes2img(boxes=boxes,image=img_cyw,tfs=[lidar2img], attrs=instance_ids)
                img = draw_boxes2img(boxes=boxes,image=img_cyw,tfs=[lidar2img])

                # img = np.array(Image.open(img_files[idx]))
                # img = draw_boxes2img(boxes=boxes,image=img,tfs=[lidar2imgs[idx]], attrs=instance_ids)

                import matplotlib.pyplot as plt
                plt.imshow(img)
                plt.axis('off')  # 关闭坐标轴
                plt.show()

            # show_result(points, None, pred_bboxes, out_dir, file_name, show=show, pred_labels=instance_ids)
            show_result(points, None, pred_bboxes, out_dir, file_name, show=show)
