import numpy as np
import tempfile
from os import path as osp
from mmdet.datasets import DATASETS
import torch
import os
import mmcv
import json
import numpy as np
from .utils import readcsv
import ast
from nuscenes.eval.common.utils import Quaternion
from mmcv.parallel import DataContainer as DC
import random
import math
from .custom_3d_fix import Custom3DDatasetF
from torch.utils.data import Dataset
from mmdet3d.core.bbox import get_box_type, Box3DMode, Coord3DMode, LiDARInstance3DBoxes
from mmdet3d.datasets.pipelines import Compose
from .localizer import Localizer
import pyquaternion
from projects.mmdet3d_plugin.core.evaluation import CYWEvaluation
from mmdet3d.datasets.nuscenes_dataset import NuScenesDataset
from nuscenes.eval.common.data_classes import EvalBoxes
from nuscenes.utils.data_classes import Box as NuScenesBox

@DATASETS.register_module()
class CYWStreamDataset(Dataset):
    NameMapping = {
        'movable_object.barrier': 'barrier',
        'vehicle.bicycle': 'bicycle',
        'vehicle.bus.bendy': 'bus',
        'vehicle.bus.rigid': 'bus',
        'vehicle.car': 'car',
        'vehicle.construction': 'construction_vehicle',
        'vehicle.motorcycle': 'motorcycle',
        'human.pedestrian.adult': 'pedestrian',
        'human.pedestrian.child': 'pedestrian',
        'human.pedestrian.construction_worker': 'pedestrian',
        'human.pedestrian.police_officer': 'pedestrian',
        'movable_object.trafficcone': 'traffic_cone',
        'vehicle.trailer': 'trailer',
        'vehicle.truck': 'truck'
    }
    DefaultAttribute = {
        'car': 'vehicle.parked',
        'pedestrian': 'pedestrian.moving',
        'trailer': 'vehicle.parked',
        'truck': 'vehicle.parked',
        'bus': 'vehicle.moving',
        'motorcycle': 'cycle.without_rider',
        'construction_vehicle': 'vehicle.parked',
        'bicycle': 'cycle.without_rider',
        'barrier': '',
        'traffic_cone': '',
    }
    AttrMapping = {
        'cycle.with_rider': 0,
        'cycle.without_rider': 1,
        'pedestrian.moving': 2,
        'pedestrian.standing': 3,
        'pedestrian.sitting_lying_down': 4,
        'vehicle.moving': 5,
        'vehicle.parked': 6,
        'vehicle.stopped': 7,
    }
    AttrMapping_rev = [
        'cycle.with_rider',
        'cycle.without_rider',
        'pedestrian.moving',
        'pedestrian.standing',
        'pedestrian.sitting_lying_down',
        'vehicle.moving',
        'vehicle.parked',
        'vehicle.stopped',
    ]
    # https://github.com/nutonomy/nuscenes-devkit/blob/57889ff20678577025326cfc24e57424a829be0a/python-sdk/nuscenes/eval/detection/evaluate.py#L222 # noqa
    ErrNameMapping = {
        'trans_err': 'mATE',
        'scale_err': 'mASE',
        'orient_err': 'mAOE',
        'vel_err': 'mAVE',
        'attr_err': 'mAAE'
    }
    CLASSES = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle',
               'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone',
               'barrier')
    def __init__(self,
                 data_root,
                 ann_file,
                 selectmode='all',
                 mission=['mmdet'],
                 pipeline=None,
                 classes=None,
                 class_mapping=None,
                 load_interval=None,
                 with_velocity=True,
                 modality=None,
                 box_type_3d='LiDAR',
                 filter_empty_gt=True,
                 test_mode=False,
                 eval_version='detection_cvpr_2019',
                 use_valid_flag=False,
                 collect_keys=[], seq_mode=True, seq_split_num=1, num_frame_losses=1, queue_length=8, random_length=0
                 ):
        # self.class_mapping = class_mapping
        self.class_mapping = {
            'car': 'car',
            'van': 'car',
            'truck': 'truck',
            'construction_vehicle': 'construction_vehicle',
            'construction_cart': 'construction_vehicle',
            'construction': 'construction_vehicle',
            'crane': 'construction_vehicle',
            'bus': 'bus',
            'trailer': 'trailer',
            'barrier': 'barrier',
            'roadBarrel': 'barrier',
            'motorcycle': 'motorcycle',
            'motor': 'motorcycle',
            'tricycle': 'motorcycle',
            'trimotorcycle': 'motorcycle',
            'bicycle': 'bicycle',
            'cyclist': 'bicycle',
            'rider': 'bicycle',
            'moped_scooter': 'bicycle',
            'pedestrian': 'pedestrian',
            'traffic_cone': 'traffic_cone',
        }

        # check class_mapping and classes
        for cls in classes:
            assert cls in self.class_mapping.keys(),"{} should be in class_mapping".format(cls)
        for key, val in self.class_mapping.items():
            assert val in classes,"{} should be in classes".format(val)

        iou_thresholds = np.arange(1, 20) * 0.05
        self.cym_eval = CYWEvaluation(iou_thresholds, classes)

        self.data_root = data_root
        self.ann_file = ann_file
        self.selectmode = selectmode
        self.mission = mission
        self.test_mode = test_mode
        self.modality = modality
        if self.modality is None:
            self.modality = dict(
                use_camera=False,
                use_lidar=True,
                use_radar=False,
                use_map=False,
                use_external=False,
            )
        self.filter_empty_gt = filter_empty_gt
        self.box_type_3d, self.box_mode_3d = get_box_type(box_type_3d)

        self.CLASSES = self.get_classes(classes)
        self.cat2id = {name: i for i, name in enumerate(self.CLASSES)}
        self.with_velocity = with_velocity
        self.eval_version = eval_version
        from nuscenes.eval.detection.config import config_factory
        self.eval_detection_configs = config_factory(self.eval_version)
        self.load_interval = load_interval
        self.use_valid_flag = use_valid_flag

        self.dataset_paths = self.get_all_dataset_path()  # 所有数据集路径
        self.dataset_paths = sorted(self.dataset_paths)
        self.data_infos = self.load_annotations()

        # only view num of everyclass for mapping
        the_class = dict()
        for data_info in self.data_infos:
            for name in data_info[1]['label']['name']:
                if name in the_class.keys():
                    the_class[name] +=1
                else:
                    the_class[name] = 0

        # process pipeline
        if pipeline is not None:
            self.pipeline = Compose(pipeline)

        # set group flag for the samplers
        # # if not self.test_mode:# zwh 推理时候不用多帧？那训练多帧干啥
        # self._set_group_flag()

        self.queue_length = queue_length
        self.collect_keys = collect_keys
        self.random_length = random_length
        self.num_frame_losses = num_frame_losses
        self.global_time = 0 #用于减少时间戳空间占用
        self.seq_mode = seq_mode
        if seq_mode:
            self.num_frame_losses = 1
            self.queue_length = 1
            self.seq_split_num = seq_split_num
            self.random_length = 0
            self._set_sequence_group_flag() # Must be called after load_annotations b/c load_annotations does sorting.

    @classmethod
    def get_classes(cls, classes=None):
        """Get class names of current dataset.

        Args:
            classes (Sequence[str] | str): If classes is None, use
                default CLASSES defined by builtin dataset. If classes is a
                string, take it as a file name. The file contains the name of
                classes where each line contains one class name. If classes is
                a tuple or list, override the CLASSES defined by the dataset.

        Return:
            list[str]: A list of class names.
        """
        if classes is None:
            return cls.CLASSES

        if isinstance(classes, str):
            # take it as a file path
            class_names = mmcv.list_from_file(classes)
        elif isinstance(classes, (tuple, list)):
            class_names = classes
        else:
            raise ValueError(f'Unsupported type {type(classes)} of classes.')

        return class_names

    def check_and_print_mission(self, dataset_mission, target_mission):
        dataset_mission = set(dataset_mission)
        target_mission = set(target_mission)
        if len(target_mission) > len(dataset_mission):
            return False

        if target_mission.issubset(dataset_mission):
            return True
        else:
            return False
    def get_all_dataset_path(self):
        """
        load paths that prefix is '__' in root_path, and save sample info into data_infos
        """
        dataset_paths = []
        datasets = os.listdir(self.data_root)
        for dataset in datasets:
            if "__" == dataset[:2]:  # 数据集以"__"开头
                if self.selectmode == 'all':  # 全选模式,数据集全选
                    pass
                elif self.selectmode == 'include':  # include模式,选择目标数据集
                    if dataset in self.selected:
                        pass
                    else:
                        continue
                elif self.selectmode == 'except':  # except,剔除目标数据集
                    if dataset in self.selected:
                        continue
                    else:
                        pass
                else:
                    exit('数据选择模式{}错误'.format(self.selectmode))

                dataset_path = os.path.join(self.data_root, dataset)
                if self.mission is not None:
                    fieldnames = ['Item', 'Details']
                    csv_datas = readcsv(os.path.join(dataset_path, 'README.csv'), fieldnames)
                    if 'Mission' not in csv_datas:
                        raise ValueError('README.csv must contain "Mission" field')
                    mission = ast.literal_eval(csv_datas['Mission'])
                    mission_compare = self.check_and_print_mission(mission, self.mission)
                    if not mission_compare:
                        diff_mission = set(self.mission) - set(mission)
                        print('数据集{}不包含{}任务'.format(dataset, list(diff_mission)))
                        continue
                dataset_paths.append(dataset_path)
        return dataset_paths

    def __len__(self):
        """Return the length of data infos.

        Returns:
            int: Length of data infos.
        """
        return len(self.data_infos)
    def _set_group_flag(self):
        """Set flag according to image aspect ratio.

        Images with aspect ratio greater than 1 will be set as group 1,
        otherwise group 0. In 3D datasets, they are all the same, thus are all
        zeros.
        """
        self.flag = np.zeros(len(self), dtype=np.uint8)
    def load_annotations(self):
        data_infos = []
        for seq_id, dataset in enumerate(self.dataset_paths):
            ann_file = os.path.join(dataset, self.ann_file)
            assert os.path.exists(ann_file), f'{ann_file} not exists'
            json_path = os.path.join(dataset, 'calib', 'calib.json')
            with open(json_path, 'r') as f:
                calibs = json.load(f)
            data = mmcv.load(ann_file, file_format='pkl')
            csv_file = os.path.join(dataset, 'localization/localization.csv')
            localizer = Localizer(csv_file=csv_file)
            # localizer.view()
            for frame_idx, key in enumerate(data):
                base2local = localizer.get_tf(key)
                data[key].update(calibs)
                data[key]['ego_pose']=base2local
                data[key]['scene_token']=seq_id
                data[key]['frame_idx']=frame_idx
                data[key]['token']= dataset.split('/')[-1]+'_'+str(key)
            data_infos.extend(list(data.items()))
        # data_infos = sorted(data_infos, key=lambda e: e[0]) # zwh: 这里数据集不需要重新排序吧，不然多数据集混合test导出video时候，视频会乱序
        data_infos = data_infos[::self.load_interval]
        self.metadata = {'version': 'v1.0-mini'}
        self.version = self.metadata['version']
        return data_infos

    def _set_sequence_group_flag(self):
        """
        Set each sequence to be a different group
        """
        res = []

        if(len(self.data_infos)!=0):
            curr_seq_id = self.data_infos[0][1]['scene_token']
        curr_sequence = 0
        for data_info in self.data_infos:
            if(data_info[1]['scene_token']!=curr_seq_id):
                curr_seq_id=data_info[1]['scene_token']
                curr_sequence += 1
            res.append(curr_sequence)

        self.flag = np.array(res, dtype=np.int64)

        # if self.seq_split_num != 1:
        if self.seq_split_num == 'all':
            self.flag = np.array(range(len(self.data_infos)), dtype=np.int64)
        else:
            bin_counts = np.bincount(self.flag) #每个序列的样本数
            new_flags = []
            curr_new_flag = -1
            for curr_flag in range(len(bin_counts)):
                split = math.floor(bin_counts[curr_flag]/self.seq_split_num)
                for _ in range(split): #如果当前子数据除split=n，那么先给个n个序列赋予id
                    curr_new_flag += 1
                    new_flags.extend([curr_new_flag for _ in range(self.seq_split_num)])
                for _ in range(split*self.seq_split_num, bin_counts[curr_flag]): #如果当前子数据除split余m，那么先给个剩余的m个放在前一个序列即可
                    new_flags.append(curr_new_flag)
                # curr_new_flag += 1
                # curr_sequence_length = np.array(
                #     list(range(0,
                #             bin_counts[curr_flag],
                #             math.ceil(bin_counts[curr_flag] / self.seq_split_num)))
                #     + [bin_counts[curr_flag]]) #把同一个序列划分self.seq_split_num份
                #
                # for sub_seq_idx in (curr_sequence_length[1:] - curr_sequence_length[:-1]):
                #     for _ in range(sub_seq_idx):
                #         new_flags.append(curr_new_flag)
                #     curr_new_flag += 1

            assert len(new_flags) == len(self.flag)
            # assert len(np.bincount(new_flags)) == len(np.bincount(self.flag)) * self.seq_split_num, (
            #     format(f'len(np.bincount(new_flags))={len(np.bincount(new_flags))},len(np.bincount(self.flag))={len(np.bincount(self.flag))},self.seq_split_num={self.seq_split_num}'))
            self.flag = np.array(new_flags, dtype=np.int64) #新的序列划分

    def pre_pipeline(self, results):
        """Initialization before data preparation.

        Args:
            results (dict): Dict before data preprocessing.

                - img_fields (list): Image fields.
                - bbox3d_fields (list): 3D bounding boxes fields.
                - pts_mask_fields (list): Mask fields of points.
                - pts_seg_fields (list): Mask fields of point segments.
                - bbox_fields (list): Fields of bounding boxes.
                - mask_fields (list): Fields of masks.
                - seg_fields (list): Segment fields.
                - box_type_3d (str): 3D box type.
                - box_mode_3d (str): 3D box mode.
        """
        results['img_fields'] = []
        results['bbox3d_fields'] = []
        results['pts_mask_fields'] = []
        results['pts_seg_fields'] = []
        results['bbox_fields'] = []
        results['mask_fields'] = []
        results['seg_fields'] = []
        results['box_type_3d'] = self.box_type_3d
        results['box_mode_3d'] = self.box_mode_3d

    def prepare_train_data(self, index):
        """
                input_dict = self.get_data_info(index)
        self.pre_pipeline(input_dict)
        example = self.pipeline(input_dict)

        return example

        Training data preparation.
        Args:
            index (int): Index for accessing the target data.
        Returns:
            dict: Training data dict of the corresponding index.
        """

        input_dict = self.get_data_info(index)
        self.pre_pipeline(input_dict)
        example = self.pipeline(input_dict)
        queue =  self.union2one2(example)
        # if len(queue['gt_bboxes_3d'])==0:
        #     print('没有box的数据  {}'.format(input_dict['pts_filename']))
        #     return None
        return queue

        # aaa = TicToc(f'index {index}')
        queue = []
        index_list = list(range(index-self.queue_length-self.random_length+1, index))
        random.shuffle(index_list)
        index_list = sorted(index_list[self.random_length:])
        index_list.append(index)
        prev_scene_token = None
        # print(f'index {index}, index_list {index_list}')
        for i in index_list:
            i = max(0, i)
            input_dict = self.get_data_info(i)

            self.pre_pipeline(input_dict)
            example = self.pipeline(input_dict)
            queue.append(example)
        # aaa.toc()

        for k in range(self.num_frame_losses):
            if self.filter_empty_gt and \
                (queue[-k-1] is None or ~(queue[-k-1]['gt_labels_3d']._data != -1).any()):
                return None
        queue =  self.union2one(queue)
        return queue

    def prepare_test_data(self, index):
        """Prepare data for testing.

        Args:
            index (int): Index for accessing the target data.

        Returns:
            dict: Testing data dict of the corresponding index.
        """
        input_dict = self.get_data_info(index)
        self.pre_pipeline(input_dict)
        example = self.pipeline(input_dict)
        return example

    def union2one2(self, queue):
        for key in self.collect_keys:
            if key != 'img_metas':
                queue[key] = DC(queue[key].data, cpu_only=False, stack=True, pad_dims=None)
            else:
                queue[key] = DC(queue[key].data, cpu_only=True)
        if not self.test_mode:
            # spetr用于适配取消原始2d头
            key_list = ['gt_bboxes_3d', 'gt_labels_3d', 'gt_bboxes', 'gt_labels', 'centers2d', 'depths']
            if 'centers2d' not in queue:
                key_list = ['gt_bboxes_3d', 'gt_labels_3d']
            for key in key_list:
                if key == 'gt_bboxes_3d':
                    queue[key] = DC(queue[key].data, cpu_only=True)
                else:
                    queue[key] = DC(queue[key].data, cpu_only=False)
        return queue

    def union2one(self, queue):
        for key in self.collect_keys:
            if key != 'img_metas':
                queue[-1][key] = DC(torch.stack([each[key].data for each in queue]), cpu_only=False, stack=True, pad_dims=None)
            else:
                queue[-1][key] = DC([each[key].data for each in queue], cpu_only=True)
        if not self.test_mode:
            # spetr用于适配取消原始2d头
            key_list = ['gt_bboxes_3d', 'gt_labels_3d', 'gt_bboxes', 'gt_labels', 'centers2d', 'depths']
            if 'centers2d' not in queue[0]:
                key_list = ['gt_bboxes_3d', 'gt_labels_3d']
            for key in key_list:
                if key == 'gt_bboxes_3d':
                    queue[-1][key] = DC([each[key].data for each in queue], cpu_only=True)
                else:
                    queue[-1][key] = DC([each[key].data for each in queue], cpu_only=False)

        queue = queue[-1]
        return queue

    def get_data_info(self, index):
        """Get data info according to the given index.

        Args:
            index (int): Index of the sample data to get.

        Returns:
            dict: Data information that will be passed to the data \
                preprocessing pipelines. It includes the following keys:

                - sample_idx (str): Sample index.
                - pts_filename (str): Filename of point clouds.
                - sweeps (list[dict]): Infos of sweeps.
                - timestamp (float): Sample timestamp.
                - img_filename (str, optional): Image filename.
                - lidar2img (list[np.ndarray], optional): Transformations \
                    from lidar to different cameras.
                - ann_info (dict): Annotation info.
        """
        info = self.data_infos[index]
        # standard protocal modified from SECOND.Pytorch

        ego_pose =  info[1]['ego_pose']
        ego_pose_inv = invert_matrix_egopose_numpy(ego_pose)

        prev_idx = self.data_infos[index-1][0] if index>0 else -1
        next_idx = self.data_infos[index+1][0] if index<len(self.data_infos)-1 else -1

        # if prev_idx==-1:
        #     self.global_time = info[0] / 1e3 - 1000 #ms
            # self.global_time=0.0
        input_dict = dict(
            sample_idx=info[0],
            pts_filename=os.path.join(self.data_root, info[1]['lidar']['path']),
            # sweeps=info[1]['sweeps'],
            ego_pose=ego_pose,
            ego_pose_inv = ego_pose_inv,
            prev_idx=prev_idx,
            next_idx=next_idx,
            scene_token=info[1]['scene_token'],
            frame_idx=info[1]['frame_idx'],
            index_id = index,
            # timestamp=info[0] / 1e3 - self.global_time,  # ms to s
            timestamp=info[0] / 1e3,  # ms to s
            version=info[1]['data_info']['version'] if 'version' in info[1]['data_info'] else '1.0',
        )

        if self.modality['use_camera']:
            image_paths = []
            lidar2img_rts = []
            intrinsics = []
            extrinsics = []
            img_timestamp = []
            assert input_dict['version'] >= '2.0'
            cam_ids = sorted(info[1]['camera'].keys())
            for camera_id in cam_ids:
                camera_info = info[1]['camera'][camera_id]
                img_timestamp.append(int(camera_info['path'].split('/')[-1].split('.')[0]) / 1e3- self.global_time)
                image_paths.append(os.path.join(self.data_root, camera_info['path']))
                intrinsic = camera_info['cam_intrinsic']
                extrinsic = np.linalg.inv(camera_info['sensor2base_link'])
                intrinsics.append(intrinsic)
                extrinsics.append(extrinsic)
                lidar2img_rts.append(intrinsic @ extrinsic)

            input_dict.update(
                dict(
                    img_timestamp=img_timestamp,
                    img_filename=image_paths,
                    lidar2img=lidar2img_rts,
                    lidar2img_ori=lidar2img_rts,
                    intrinsics=intrinsics,
                    extrinsics=extrinsics
                ))

        if not self.test_mode:  # for seq_mode
            prev_exists = not (index == 0 or self.flag[index - 1] != self.flag[index])
        else:
            prev_exists = None
        input_dict.update(
            dict(
                prev_exists=prev_exists,
            ))

        if not self.test_mode:
            annos = self.get_ann_info(index)
            input_dict['ann_info'] = annos
            
        return input_dict

    def get_ann_info(self, index):
        """Get annotation info according to the given index.

        Args:
            index (int): Index of the annotation data to get.

        Returns:
            dict: Annotation information consists of the following keys:

                - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`):
                    3D ground truth bboxes
                - gt_labels_3d (np.ndarray): Labels of ground truths.
                - gt_names (list[str]): Class names of ground truths.
        """
        info = self.data_infos[index][1]
        # filter out bbox containing no points
        if(len(info['label']['location'])!=0):
            gt_bboxes_3d = np.concatenate(
                (info['label']['location'], info['label']['dimensions'], info['label']['rotation_y']), axis=1)
        else:
            gt_bboxes_3d = [] # zwh:只能写空，如果是np.empty((0,7))会导致计算target时候维度对不上
            # gt_bboxes_3d = np.empty((0,7)) # if no gt
        gt_names_3d = info['label']['name']
        gt_labels_3d = []
        for cat in gt_names_3d:
            if cat in self.class_mapping:
                cls_mapped = self.class_mapping[cat]
                gt_labels_3d.append(self.CLASSES.index(cls_mapped))
            else:
                gt_labels_3d.append(-1)
        gt_labels_3d = np.array(gt_labels_3d)

        if self.with_velocity and len(gt_bboxes_3d)!=0:
            if 'velocity' in info['label']:
                gt_velocity = info['label']['velocity']
            else:
                gt_velocity = np.zeros((gt_bboxes_3d.shape[0], 2))
            gt_bboxes_3d = np.concatenate([gt_bboxes_3d, gt_velocity], axis=-1)

        # the nuscenes box center is [0.5, 0.5, 0.5], we change it to be
        # the same as KITTI (0.5, 0.5, 0)
        gt_bboxes_3d = LiDARInstance3DBoxes(
            gt_bboxes_3d,
            box_dim= 9 if self.with_velocity else 7,
            origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)

        anns_results = dict(
            gt_bboxes_3d=gt_bboxes_3d,
            gt_labels_3d=gt_labels_3d,
            gt_names=gt_names_3d)

        # 目标轨迹
        if 'gt_agent_fut_trajs' in info['label']:
            anns_results['gt_agent_fut_trajs'] = np.array(info['label']['gt_agent_fut_trajs'], dtype=np.float32)
            anns_results['gt_agent_fut_masks'] = np.array(info['label']['gt_agent_fut_masks'], dtype=np.float32)

        if 'gt_ego_fut_trajs' in info['label']:
            anns_results['gt_ego_fut_trajs'] = np.array(info['label']['gt_ego_fut_trajs'], dtype=np.float32)
            anns_results['gt_ego_fut_masks'] = np.array(info['label']['gt_ego_fut_masks'], dtype=np.float32)
            anns_results['gt_ego_fut_cmd'] = np.array(info['label']['gt_ego_fut_cmd'], dtype=np.int32)


        return anns_results

    def _format_bbox(self, results, jsonfile_prefix=None):
        """Convert the results to the standard format.

        Args:
            results (list[dict]): Testing results of the dataset.
            jsonfile_prefix (str): The prefix of the output jsonfile.
                You can specify the output directory/filename by
                modifying the jsonfile_prefix. Default: None.

        Returns:
            str: Path of the output json file.
        """
        nusc_annos = {}
        mapped_class_names = self.CLASSES

        print('Start to convert detection format...')
        for sample_id, det in enumerate(mmcv.track_iter_progress(results)):
            annos = []
            boxes = output_to_nusc_box(det, self.with_velocity)
            sample_token = self.data_infos[sample_id][1]['token']
            # boxes = lidar_nusc_box_to_global(self.data_infos[sample_id][1], boxes,
            #                                  mapped_class_names,
            #                                  self.eval_detection_configs,
            #                                  self.eval_version)
            for i, box in enumerate(boxes):
                name = mapped_class_names[box.label]
                if np.sqrt(box.velocity[0] ** 2 + box.velocity[1] ** 2) > 0.2:
                    if name in [
                        'car',
                        'construction_vehicle',
                        'bus',
                        'truck',
                        'trailer',
                    ]:
                        attr = 'vehicle.moving'
                    elif name in ['bicycle', 'motorcycle']:
                        attr = 'cycle.with_rider'
                    else:
                        attr = NuScenesDataset.DefaultAttribute[name]
                else:
                    if name in ['pedestrian']:
                        attr = 'pedestrian.standing'
                    elif name in ['bus']:
                        attr = 'vehicle.stopped'
                    else:
                        attr = NuScenesDataset.DefaultAttribute[name]

                nusc_anno = dict(
                    sample_token=sample_token,
                    translation=box.center.tolist(),
                    size=box.wlh.tolist(),
                    rotation=box.orientation.elements.tolist(),
                    velocity=box.velocity[:2].tolist(),
                    detection_name=name,
                    detection_score=box.score,
                    attribute_name=attr)
                annos.append(nusc_anno)
            nusc_annos[sample_token] = annos
        nusc_submissions = {
            'meta': self.modality,
            'results': nusc_annos,
        }

        mmcv.mkdir_or_exist(jsonfile_prefix)
        res_path = osp.join(jsonfile_prefix, 'results_nusc.json')
        print('Results writes to', res_path)
        mmcv.dump(nusc_submissions, res_path)
        return res_path

    def _evaluate_single(self,
                         result_path,
                         gt_boxes=None,
                         logger=None,
                         metric='bbox',
                         result_name='pts_bbox'):
        """Evaluation for a single model in nuScenes protocol.

        Args:
            result_path (str): Path of the result file.
            logger (logging.Logger | str, optional): Logger used for printing
                related information during evaluation. Default: None.
            metric (str, optional): Metric name used for evaluation.
                Default: 'bbox'.
            result_name (str, optional): Result name in the metric prefix.
                Default: 'pts_bbox'.

        Returns:
            dict: Dictionary of evaluation details.
        """
        from nuscenes import NuScenes
        from projects.mmdet3d_plugin.core.evaluation import CYWDetectionEval

        output_dir = osp.join(*osp.split(result_path)[:-1])
        # nusc = NuScenes(
        #     version=self.version, dataroot=self.data_root, verbose=False)
        # eval_set_map = {
        #     'v1.0-mini': 'mini_val',
        #     'v1.0-trainval': 'val',
        # }
        nusc_eval = CYWDetectionEval(
            nusc=gt_boxes,
            config=self.eval_detection_configs,
            result_path=result_path,
            eval_set=None,
            output_dir=output_dir,
            verbose=False)
        nusc_eval.main(render_curves=True)

        # record metrics
        metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))
        detail = dict()
        metric_prefix = f'{result_name}_NuScenes'
        for name in self.CLASSES:
            for k, v in metrics['label_aps'][name].items():
                val = float('{:.4f}'.format(v))
                detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val
            for k, v in metrics['label_tp_errors'][name].items():
                val = float('{:.4f}'.format(v))
                detail['{}/{}_{}'.format(metric_prefix, name, k)] = val
            for k, v in metrics['tp_errors'].items():
                val = float('{:.4f}'.format(v))
                detail['{}/{}'.format(metric_prefix,
                                      self.ErrNameMapping[k])] = val

        detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score']
        detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']
        return detail

    def format_results(self, results, jsonfile_prefix=None):
        """Format the results to json (standard format for COCO evaluation).

        Args:
            results (list[dict]): Testing results of the dataset.
            jsonfile_prefix (str): The prefix of json files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.

        Returns:
            tuple: Returns (result_files, tmp_dir), where `result_files` is a
                dict containing the json filepaths, `tmp_dir` is the temporal
                directory created for saving json files when
                `jsonfile_prefix` is not specified.
        """
        assert isinstance(results, list), 'results must be a list'
        assert len(results) == len(self), (
            'The length of results is not equal to the dataset len: {} != {}'.
                format(len(results), len(self)))

        if jsonfile_prefix is None:
            tmp_dir = tempfile.TemporaryDirectory()
            jsonfile_prefix = osp.join(tmp_dir.name, 'results')
        else:
            tmp_dir = None

        # currently the output prediction results could be in two formats
        # 1. list of dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...)
        # 2. list of dict('pts_bbox' or 'img_bbox':
        #     dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...))
        # this is a workaround to enable evaluation of both formats on nuScenes
        # refer to https://github.com/open-mmlab/mmdetection3d/issues/449
        if not ('pts_bbox' in results[0] or 'img_bbox' in results[0]):
            result_files = self._format_bbox(results, jsonfile_prefix)
        else:
            # should take the inner dict out of 'pts_bbox' or 'img_bbox' dict
            result_files = dict()
            for name in results[0]:
                print(f'\nFormating bboxes of {name}')
                results_ = [out[name] for out in results]
                tmp_file_ = osp.join(jsonfile_prefix, name)
                result_files.update(
                    {name: self._format_bbox(results_, tmp_file_)})
        return result_files, tmp_dir

    def evaluate(self,
                 results,
                 metric='bbox',
                 logger=None,
                 jsonfile_prefix=None,
                 result_names=['pts_bbox'],
                 show=False,
                 out_dir=None,
                 pipeline=None,
                 rotate_value=0.0,
                 filted_gt_box=False):
        """Evaluation in nuScenes protocol.

        Args:
            results (list[dict]): Testing results of the dataset.
            metric (str | list[str], optional): Metrics to be evaluated.
                Default: 'bbox'.
            logger (logging.Logger | str, optional): Logger used for printing
                related information during evaluation. Default: None.
            jsonfile_prefix (str, optional): The prefix of json files including
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.
            show (bool, optional): Whether to visualize.
                Default: False.
            out_dir (str, optional): Path to save the visualization results.
                Default: None.
            pipeline (list[dict], optional): raw data loading for showing.
                Default: None.

        Returns:
            dict[str, float]: Results of each evaluation metric.
        """
        from nuscenes.eval.detection.data_classes import DetectionBox

        assert len(results) == len(self.data_infos)
        self.test_mode = False # collect gt_boxes
        all_annotations = EvalBoxes()
        for image_id, data_info in enumerate(self.data_infos):
            example = self.prepare_test_data(image_id)
            sample_boxes = []
            sample_token = data_info[1]['token']
            for idx, (box, label) in enumerate(zip(example['gt_bboxes_3d'][0].data.tensor.numpy(),
                                  example['gt_labels_3d'][0].data.numpy())):
                # gt_box = dict()
                # gt_box['sample_token'] = data_info[1]['token']
                # gt_box['translation'] = box[:3]
                # gt_box['size'] = box[3:6]
                # gt_box['rotation'] = box[6:7]
                # gt_box['velocity'] = box[7:9]
                # gt_box['detection_name'] = self.class_mapping[data_info[1]['label']['name'][idx]]
                # gt_box['attribute_name'] = gt_box['detection_name']
                # gt_boxes.append(gt_box)

                quat = list(pyquaternion.Quaternion(axis=[0, 0, 1], radians=box[6]))
                name = self.class_mapping[data_info[1]['label']['name'][idx]]
                box = DetectionBox(
                    sample_token=sample_token,
                    translation=list(box[:3]),
                    size=list(box[3:6]),
                    rotation=quat,
                    velocity=box[7:9],
                    num_pts=0,
                    detection_name=name,
                    detection_score=-1.0,  # GT samples do not have a score.
                    attribute_name=''
                )
                sample_boxes.append(box)
            all_annotations.add_boxes(sample_token, sample_boxes)

        result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
        if isinstance(result_files, dict):
            results_dict = dict()
            for name in result_names:
                print('Evaluating bboxes of {}'.format(name))
                ret_dict = self._evaluate_single(result_files[name],gt_boxes=all_annotations)
            results_dict.update(ret_dict)
        elif isinstance(result_files, str):
            results_dict = self._evaluate_single(result_files)

        if tmp_dir is not None:
            tmp_dir.cleanup()

        # if show or out_dir:
        #     self.show(results, out_dir, show=show, pipeline=pipeline)
        return results_dict

    def _rand_another(self, idx):
        # """随机选择其他flag类别中的第一个索引"""
        # current_flag = self.flag[idx]
        #
        # # 获取所有不同的flag值（排除当前flag）
        # other_flags = np.unique(self.flag[self.flag != current_flag])
        #
        # # 随机选择一个目标flag
        # target_flag = np.random.choice(other_flags)
        #
        # # 找到目标flag的第一个出现位置
        # first_idx = np.where(self.flag == target_flag)[0][0]
        #
        # return first_idx
        """Randomly get another item with the same flag.

        Returns:
            int: Another index of item with the same flag.
        """
        pool = np.where(self.flag == self.flag[idx])[0]
        return np.random.choice(pool)

    def __getitem__(self, idx):
        """Get item from infos according to the given index.
        Returns:
            dict: Data dictionary of the corresponding index.
        """
        if self.test_mode:
            return self.prepare_test_data(idx)
        while True:
            # import time
            # start = time.time()
            data = self.prepare_train_data(idx)
            # print(f"Worker {os.getpid()} 处理样本 {idx} 耗时: {time.time() - start:.4f}s")
            if data is None:#不走这里，空的也训练吧
                print('没有框的帧id {} 属于序列{}'.format(idx, self.flag[idx]))
                idx = self._rand_another(idx)
                print('重新选择帧id {} 属于序列{}'.format(idx, self.flag[idx]))
                continue
            return data

def output_to_nusc_box(detection, with_velocity=True):
    """Convert the output to the box class in the nuScenes.

    Args:
        detection (dict): Detection results.

            - boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox.
            - scores_3d (torch.Tensor): Detection scores.
            - labels_3d (torch.Tensor): Predicted box labels.

    Returns:
        list[:obj:`NuScenesBox`]: List of standard NuScenesBoxes.
    """
    box3d = detection['boxes_3d']
    scores = detection['scores_3d'].numpy()
    labels = detection['labels_3d'].numpy()

    box_gravity_center = box3d.gravity_center.numpy()
    box_dims = box3d.dims.numpy()
    box_yaw = box3d.yaw.numpy()

    # our LiDAR coordinate system -> nuScenes box coordinate system
    nus_box_dims = box_dims[:, [1, 0, 2]]

    box_list = []
    for i in range(len(box3d)):
        quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw[i])
        if with_velocity:
            velocity = (*box3d.tensor[i, 7:9], 0.0)
        else:
            velocity = (0, 0, 0)
        # velo_val = np.linalg.norm(box3d[i, 7:9])
        # velo_ori = box3d[i, 6]
        # velocity = (
        # velo_val * np.cos(velo_ori), velo_val * np.sin(velo_ori), 0.0)
        box = NuScenesBox(
            box_gravity_center[i],
            nus_box_dims[i],
            quat,
            label=labels[i],
            score=scores[i],
            velocity=velocity)
        box_list.append(box)
    return box_list


def invert_matrix_egopose_numpy(egopose):
    """ Compute the inverse transformation of a 4x4 egopose numpy matrix."""
    inverse_matrix = np.zeros((4, 4), dtype=np.float32)
    rotation = egopose[:3, :3]
    translation = egopose[:3, 3]
    inverse_matrix[:3, :3] = rotation.T
    inverse_matrix[:3, 3] = -np.dot(rotation.T, translation)
    inverse_matrix[3, 3] = 1.0
    return inverse_matrix

def convert_egopose_to_matrix_numpy(rotation, translation):
    transformation_matrix = np.zeros((4, 4), dtype=np.float32)
    transformation_matrix[:3, :3] = rotation
    transformation_matrix[:3, 3] = translation
    transformation_matrix[3, 3] = 1.0
    return transformation_matrix