# Copyright (c) OpenMMLab. All rights reserved.
import os
import mmcv
import torch
import csv
import cv2
import numpy as np
import math
import pyquaternion
from .vector_map import VectorizedLocalMap
from mmdet.datasets.pipelines import to_tensor
from mmcv.parallel import DataContainer as DC

import tempfile
from nuscenes.utils.data_classes import Box as NuScenesBox

from tqdm import tqdm
from nuscenes.eval.common.utils import quaternion_yaw, Quaternion
from os import path as osp
from .utils import nuscenes_get_rt_matrix

import copy
from .builder import DATASETS
from .nuscenes_dataset import NuScenesDataset
from .occ_metrics import Metric_mIoU, Metric_FScore
from .nuscenes_dataset_occ import NuScenesDatasetOccpancy

@DATASETS.register_module()
class NuScenesDatasetOccpancyPlan(NuScenesDatasetOccpancy):
    def __init__(self,
                 ann_file=None,
                 pipeline=None,
                 data_root=None,
                 classes=None,
                 load_interval=1,
                 with_velocity=True,
                 modality=None,
                 box_type_3d='LiDAR',
                 filter_empty_gt=True,
                 test_mode=False,
                 eval_version='detection_cvpr_2019',
                 use_valid_flag=False,
                 img_info_prototype='mmcv',
                 multi_adj_frame_id_cfg=None,
                 occupancy_path='/mount/dnn_data/occupancy_2023/gts',
                 ego_cam='CAM_FRONT',
                 # SOLLOFusion
                 use_sequence_group_flag=False,
                 load_fut_bbox_info=False,
                 sequences_split_num=1,
                ):

        super().__init__(
            data_root=data_root,
            ann_file=ann_file,
            pipeline=pipeline,
            classes=classes,
            modality=modality,
            box_type_3d=box_type_3d,
            filter_empty_gt=filter_empty_gt,
            test_mode=test_mode)
        self.occupancy_path = occupancy_path
        self.with_velocity = with_velocity
        self.load_fut_bbox_info = load_fut_bbox_info
        self.eval_version = eval_version
        from nuscenes.eval.detection.config import config_factory

        self.eval_detection_configs = config_factory(self.eval_version)
        if self.modality is None:
            self.modality = dict(
                use_camera=False,
                use_lidar=True,
                use_radar=False,
                use_map=False,
                use_external=False,
            )


        self.img_info_prototype = img_info_prototype
        self.multi_adj_frame_id_cfg = multi_adj_frame_id_cfg
        self.ego_cam = ego_cam
        self.nusc = None

        # SOLOFusion
        self.use_sequence_group_flag = use_sequence_group_flag
        self.sequences_split_num = sequences_split_num
        # sequences_split_num splits eacgh sequence into sequences_split_num parts.
        # if self.test_mode:
        #     assert self.sequences_split_num == 1
        if self.use_sequence_group_flag:
            self._set_sequence_group_flag() # Must be called after load_annotations b/c load_annotations does sorting.

    def _set_sequence_group_flag(self):
        """
        Set each sequence to be a different group
        """
           
        res = []
        curr_sequence = 0
        for idx in range(len(self.data_infos)):
            if idx != 0 and len(self.data_infos[idx]['prev']) == 0:
                # Not first frame and # of sweeps is 0 -> new sequence
                curr_sequence += 1
            res.append(curr_sequence)
        self.flag = np.array(res, dtype=np.int64)
        if self.sequences_split_num != 1:
            if self.sequences_split_num == 'all':
                self.flag = np.array(range(len(self.data_infos)), dtype=np.int64)
            else:
                bin_counts = np.bincount(self.flag)
                new_flags = []
                curr_new_flag = 0
                for curr_flag in range(len(bin_counts)):
                    curr_sequence_length = np.array(
                        list(range(0, 
                                bin_counts[curr_flag], 
                                math.ceil(bin_counts[curr_flag] / self.sequences_split_num)))
                        + [bin_counts[curr_flag]])
                    for sub_seq_idx in (curr_sequence_length[1:] - curr_sequence_length[:-1]):
                        for _ in range(sub_seq_idx):
                            new_flags.append(curr_new_flag)
                        curr_new_flag += 1

                assert len(new_flags) == len(self.flag)
                assert len(np.bincount(new_flags)) == len(np.bincount(self.flag)) * self.sequences_split_num
                self.flag = np.array(new_flags, dtype=np.int64)

    def _format_map_gt(self):
        gt_annos = []
        print('Start to convert gt map format...')
        # assert self.map_ann_file is not None

        if  (not os.path.exists(self.map_ann_file)) :

            patch_h, patch_w = self.map_eval_cfg['region']
            patch_h = min(patch_h, 50)
            self.vector_map = VectorizedLocalMap(self.data_root, 
                            patch_size=(patch_h, patch_w), map_classes=self.MAPCLASSES, 
                            fixed_ptsnum_per_line=20,
                            padding_value=-10000)

            dataset_length = len(self)
            prog_bar = mmcv.ProgressBar(dataset_length)
            mapped_class_names = self.MAPCLASSES
            for sample_id in range(dataset_length):
                sample_token = self.data_infos[sample_id]['token']
                gt_anno = {}
                gt_anno['sample_token'] = sample_token
                # gt_sample_annos = []
                gt_sample_dict = {}
                
                ego_pose = torch.FloatTensor(nuscenes_get_rt_matrix(
                    self.data_infos[sample_id], self.data_infos[sample_id],
                    "ego", "global"))

                ego2global_translation = list(ego_pose[:3,3].numpy())
                v = np.dot(ego_pose[:3,:3].numpy(), np.array([1, 0, 0]))
                yaw = np.arctan2(v[1], v[0])
                patch_angle = yaw / np.pi * 180
                location = self.data_infos[sample_id]['map_location']

                gt_sample_dict =  self.vectormap_pipeline(location, ego2global_translation, patch_angle)
                gt_labels = gt_sample_dict['map_gt_labels_3d'].data.numpy()
                gt_vecs = gt_sample_dict['map_gt_bboxes_3d'].data.instance_list
                gt_vec_list = []
                for i, (gt_label, gt_vec) in enumerate(zip(gt_labels, gt_vecs)):
                    name = mapped_class_names[gt_label]
                    anno = dict(
                        pts=np.array(list(gt_vec.coords)),
                        pts_num=len(list(gt_vec.coords)),
                        cls_name=name,
                        type=gt_label,
                    )
                    gt_vec_list.append(anno)
                gt_anno['vectors']=gt_vec_list
                gt_annos.append(gt_anno)

                prog_bar.update()
            nusc_submissions = {
                'GTs': gt_annos
            }
            print('\n GT anns writes to', self.map_ann_file)
            mmcv.dump(nusc_submissions, self.map_ann_file)
        else:
            print(f'{self.map_ann_file} exist, not update')

    def vectormap_pipeline(self, location, ego2global_translation, patch_angle, flip_dx=False, flip_dy=False):
        '''
        `example` type: <class 'dict'>
            keys: 'img_metas', 'gt_bboxes_3d', 'gt_labels_3d', 'img';
                  all keys type is 'DataContainer';
                  'img_metas' cpu_only=True, type is dict, others are false;
                  'gt_labels_3d' shape torch.size([num_samples]), stack=False,
                                padding_value=0, cpu_only=False
                  'gt_bboxes_3d': stack=False, cpu_only=True
        '''

        anns_results = self.vector_map.gen_vectorized_samples(
            location, ego2global_translation, patch_angle, flip_dx, flip_dy
        )
        
        '''
        anns_results, type: dict
            'gt_vecs_pts_loc': list[num_vecs], vec with num_points*2 coordinates
            'gt_vecs_pts_num': list[num_vecs], vec with num_points
            'gt_vecs_label': list[num_vecs], vec with cls index
        '''
        gt_vecs_label = to_tensor(anns_results['gt_vecs_label'])
        from .vector_map import LiDARInstanceLines
        if isinstance(anns_results['gt_vecs_pts_loc'], LiDARInstanceLines):
            gt_vecs_pts_loc = anns_results['gt_vecs_pts_loc']
        else:
            gt_vecs_pts_loc = to_tensor(anns_results['gt_vecs_pts_loc'])
            try:
                gt_vecs_pts_loc = gt_vecs_pts_loc.flatten(1).to(dtype=torch.float32)
            except:
                # empty tensor, will be passed in train, 
                # but we preserve it for test
                gt_vecs_pts_loc = gt_vecs_pts_loc

        return dict(
            map_gt_labels_3d = DC(gt_vecs_label, cpu_only=False),
            map_gt_bboxes_3d = DC(gt_vecs_pts_loc, cpu_only=True),
        )


    def get_data_info(self, index):

        info = copy.deepcopy(self.data_infos[index])
        # standard protocol modified from SECOND.Pytorch
        input_dict = super(NuScenesDatasetOccpancy, self).get_data_info(index)
        input_dict = dict(
            index=index,
            sample_idx=info['token'],
            pts_filename=info['lidar_path'],
            sweeps=info['sweeps'],
            scene_name=info['scene_name'],
            timestamp=info['timestamp'] / 1e6,
            lidarseg_filename=info.get('lidarseg_filename', 'None') 
        )
        if 'occ_path' in info:
            input_dict['occ_gt_path'] = info['occ_path']
        if 'ann_infos' in info:
            input_dict['ann_infos'] = info['ann_infos']
            
        if self.modality['use_camera']:
            if self.img_info_prototype == 'mmcv':
                image_paths = []
                lidar2img_rts = []
                cam_positions = []

                for cam_type, cam_info in info['cams'].items():
                    image_paths.append(cam_info['data_path'])
                    # obtain lidar to image transformation matrix
                    lidar2cam_r = np.linalg.inv(
                        cam_info['sensor2lidar_rotation'])
                    lidar2cam_t = cam_info[
                        'sensor2lidar_translation'] @ lidar2cam_r.T
                    lidar2cam_rt = np.eye(4)
                    lidar2cam_rt[:3, :3] = lidar2cam_r.T
                    lidar2cam_rt[3, :3] = -lidar2cam_t
                    intrinsic = cam_info['cam_intrinsic']
                    viewpad = np.eye(4)
                    viewpad[:intrinsic.shape[0], :intrinsic.
                            shape[1]] = intrinsic
                    lidar2img_rt = (viewpad @ lidar2cam_rt.T)
                    lidar2img_rts.append(lidar2img_rt)
                    cam_position = np.linalg.inv(lidar2cam_rt.T) @ np.array([0., 0., 0., 1.]).reshape([4, 1])
                    cam_positions.append(cam_position.flatten()[:3])
                   

                input_dict.update(
                    dict(
                        img_filename=image_paths,
                        lidar2img=lidar2img_rts,
                    ))

                if not self.test_mode:
                    annos = self.get_ann_info(index)
                    input_dict['ann_info'] = annos
            else:   
                assert 'bevdet' in self.img_info_prototype
                input_dict.update(dict(curr=info))
                if '4d' in self.img_info_prototype:
                    info_adj_list = self.get_adj_info(info, index)
                    input_dict.update(dict(adjacent=info_adj_list))
            if self.use_sequence_group_flag:
                input_dict['sample_index'] = index
                input_dict['sequence_group_idx'] = self.flag[index]
                input_dict['start_of_sequence'] = index == 0 or self.flag[index - 1] != self.flag[index]
                # Get a transformation matrix from current keyframe lidar to previous keyframe lidar
                # if they belong to same sequence.
                can_bus_info = info['gt_ego_lcf_feat']
                input_dict['can_bus_info'] = can_bus_info
                input_dict['nuscenes_get_rt_matrix'] = dict(
                    lidar2ego_rotation = info['lidar2ego_rotation'],
                    lidar2ego_translation = info['lidar2ego_translation'],
                    ego2global_rotation = info['ego2global_rotation'],
                    ego2global_translation = info['ego2global_translation'],
                )

                input_dict['ego_pose_inv'] = torch.FloatTensor(nuscenes_get_rt_matrix(
                    info, info,
                    "global", "ego"))
                
                input_dict['ego_pose'] = torch.FloatTensor(nuscenes_get_rt_matrix(
                    info, info,
                    "ego", "global"))

                if not input_dict['start_of_sequence']:
                    input_dict['curr_to_prev_lidar_rt'] = torch.FloatTensor(nuscenes_get_rt_matrix(
                        info, self.data_infos[index - 1],
                        "lidar", "lidar"))
                    input_dict['prev_lidar_to_global_rt'] = torch.FloatTensor(nuscenes_get_rt_matrix(
                        self.data_infos[index - 1], info,
                        "lidar", "global")) # TODO: Note that global is same for all.
                    input_dict['curr_to_prev_ego_rt'] = torch.FloatTensor(nuscenes_get_rt_matrix(
                       info, self.data_infos[index - 1],
                        "ego", "ego"))
                else:
                    input_dict['curr_to_prev_lidar_rt'] = torch.eye(4).float()
                    input_dict['prev_lidar_to_global_rt'] = torch.FloatTensor(nuscenes_get_rt_matrix( 
                        info, info, "lidar", "global")
                        )
                    input_dict['curr_to_prev_ego_rt'] = torch.FloatTensor(nuscenes_get_rt_matrix(
                        info, info,
                        "ego", "ego"))
                input_dict['global_to_curr_lidar_rt'] = torch.FloatTensor(nuscenes_get_rt_matrix(
                    info, info,
                    "global", "lidar"))
                

                if self.load_fut_bbox_info:
                    fut_boxes_info, fut_labels_info = self.get_fut_bbox_info(info, index)
                    input_dict['fut_boxes_info'] = fut_boxes_info
                    input_dict['fut_labels_info'] = fut_labels_info
                 
        return input_dict

    def get_fut_bbox_info(self, info, index):
        fut_boxes_info = []
        fut_labels_info = []
        for select_id in range(1, 7):
            select_id = min(index + select_id, len(self.data_infos)-1)
            if not self.data_infos[select_id]['scene_token'] == info[
                    'scene_token']:
                fut_boxes_info.append([])
                fut_labels_info.append([])
            else:
                fut_boxes_info.append(self.data_infos[select_id]['ann_infos']['gt_boxes_3d_in_global'])
                fut_labels_info.append(self.data_infos[select_id]['ann_infos']['gt_labels_3d'])

        return fut_boxes_info, fut_labels_info


    def evaluate(self,
                 results,
                 logger=None,
                 metric='bbox',
                 jsonfile_prefix='test',
                 result_names=['pts_bbox'],
                 show=False,
                 out_dir=None,
                 pipeline=None,
                 save=False,
                ):
        results_dict = {}
        mmcv.mkdir_or_exist(jsonfile_prefix)

        if results[0].get('pred_ego_traj', None) is not None:
            results_dict.update(
                self.evaluate_ego_traj(
                    results,
                    jsonfile_prefix=jsonfile_prefix,
                    logger=logger
                )
            )
        if results[0].get('pred_occ', None) is not None:
            results_dict.update(self.evaluate_occupancy(results, show_dir=jsonfile_prefix, save=save))
            # results_dict.update(self.evaluate_occupancy(results, show_dir=jsonfile_prefix, save=save))
            
        if results[0].get('iou', None) is not None:
            results_dict.update(self.evaluate_mask(results))

        if results[0].get('pts_bbox', None) is not None:

            results_dict.update(self.evaluate_bbox(results, logger=logger,
                    metric=metric,
                    jsonfile_prefix=jsonfile_prefix,
                    result_names=result_names,
                    show=show,
                    out_dir=out_dir,
                    pipeline=pipeline))
            
            """if the output information has no tracking info, this func dose nothing"""
            results_dict.update(self.evaluate_tracking(results, logger=logger,
                    metric=metric,
                    jsonfile_prefix=jsonfile_prefix,
                    result_names=result_names,
                    show=show,
                    out_dir=out_dir,
                    pipeline=pipeline))

        with open(osp.join(jsonfile_prefix, 'results.csv'), 'w', newline='') as f:
            writer = csv.writer(f)
            for key in results_dict.keys():
                writer.writerow([key, results_dict[key]])

        return results_dict

    def evaluate_ego_traj(self, results, jsonfile_prefix=None, logger=None):
        print('Start to convert traj format...')
        l2_dist_list = []
        res = torch.zeros(1, 6)
        res_c = torch.zeros(1, 6)
        processed_set = set()
        ego_trajs_in_global_dict = dict(
            trajs=dict(),
            map_lane=dict(),
            map_label=dict(),
        )
        c = 0
        
        for sample_id, traj in enumerate(mmcv.track_iter_progress(results)):
            sample_id = traj['pred_ego_traj']['index']
            l2_dist = traj['pred_ego_traj']['metric_dict'].pop('l2_dist')
            if sample_id in processed_set: continue
            # if traj['pred_ego_traj']['gt_ego_fut_cmd'][-1] == 1: continue
            processed_set.add(sample_id)
            c += 1
            ego_trajs_in_global = traj['pred_ego_traj']['ego_trajs_in_global'].numpy()
            ego_trajs_in_global_dict['trajs'][traj['pred_ego_traj']['index_w_scene']] = ego_trajs_in_global
            mask = l2_dist >= 0
            res[mask] += l2_dist[mask]
            res_c[mask] += 1
            info = self.data_infos[sample_id]

        print('valid: ', c)

        l2_dist = (res/res_c).cpu().numpy()
       
        print('++++++++++++++')
        print('l2_dist')
        print(l2_dist)
        print('--------------')


        metric_dict = [None, None, None]

        for i in range(3):
            num_valid = 0
            processed_set = set()
            for sample_id, traj in enumerate(mmcv.track_iter_progress(results)):
                sample_id = traj['pred_ego_traj']['index']
                if sample_id in processed_set: continue
                if i == 1 and traj['pred_ego_traj']['gt_ego_fut_cmd'][-1] == 1: continue
                if i == 2 and traj['pred_ego_traj']['gt_ego_fut_cmd'][-1] != 1: continue
                processed_set.add(sample_id)
                if not traj['pred_ego_traj']['metric_dict']['fut_valid_flag']: continue
                else: num_valid += 1

                if metric_dict[i] is None:
                    metric_dict[i] = copy.deepcopy(traj['pred_ego_traj']['metric_dict'])
                else:
                    for k in traj['pred_ego_traj']['metric_dict'].keys():
                        metric_dict[i][k] += traj['pred_ego_traj']['metric_dict'][k]

            print('valid_after: ', num_valid, i)
            for k in metric_dict[i]:
                metric_dict[i][k] = str(metric_dict[i][k] / num_valid)
                print("{}:{}:{}".format(i, k, metric_dict[i][k]))

        res_path = osp.join(jsonfile_prefix, 'results_nusc_planning.json')
        print('Results writes to', res_path)
    
        mmcv.dump(ego_trajs_in_global_dict, res_path)
        metric_dict[0].update(self.smoothness(ego_trajs_in_global_dict['trajs']))

        avg_l2 = 0
        avg_col = 0
        for i in range(1,4):
            avg_l2 += float(metric_dict[0][f'plan_L2_{i}s'])
            avg_col += float(metric_dict[0][f'plan_obj_box_col_{i}s'])
        avg_l2 /= 3
        avg_col /= 3
        print(f'avg_l2 {avg_l2}, avg_col {avg_col}')
        print('--------------')
        # metric_dict['l2_dist'] = l2_dist
        metric_dict[0]['avg_l2'] = avg_l2
        metric_dict[0]['avg_col'] = avg_col
        return metric_dict[0]

    def smoothness(self, data):
        keys = list(data.keys())
        # print(keys)
        new_keys = []
        for key in keys:
            s = key.split("-")
            new_keys.append([int(s[1]),int(s[2])])

        new_keys=sorted(new_keys,key=(lambda x:(x[0], x[1])))
        sorted_keys = []
        for key in new_keys:
            v = ['scene',  str(key[0]).zfill(4), str(key[1]) ]
            k='-'.join(v)
            sorted_keys.append(k)


        all_scene_keys=[]
        key='-'.join(sorted_keys[0].split("-")[:2])
        scene=[]

        for k in sorted_keys:
            if(key in k):
                # print(True)
                scene.append(k)
            else:
                s =k.split("-")
                key='-'.join(s[:2])
                all_scene_keys.append(scene)
                scene=[k]

        #tranform raw data
        new_data={}
        for keys in all_scene_keys:
            l = len(keys)
            for i in range(l):
                val = []
                index = i
                for j in range(i+1):
                    if index>6:
                        index-=1
                    else:
                        val.append(data[keys[j]][index])
                        index-=1
                new_data[keys[i]]=val

        #compute mean and var
        res = {
            'stable_mean_distance_1s': [],
            'stable_variance_distance_1s': [],
            'stable_mean_distance_2s': [],
            'stable_variance_distance_2s': [],
            'stable_mean_distance_3s': [],
            'stable_variance_distance_3s': [],
        }
        
        for key, value in new_data.items():
            #filter unstable data
            if(len(value)!=7):
                continue
            assert len(value)==7
            #compute mean
            for window in [1, 2, 3]:
                gt = value[-1]
                pred = value[6-window*2:-1]
                #compute var
                data_array = np.array(pred)

                distances = np.linalg.norm(data_array - gt, axis=1)
                mean_distance = np.mean(distances)
                variance_distance = np.var(distances)
                res[f'stable_mean_distance_{window}s'].append(mean_distance)
                res[f'stable_variance_distance_{window}s'].append(variance_distance)
        
        for key in res.keys():
            res[key] = np.mean(res[key])
        print(res)
        return res

    # def evaluate_occupancy(self, occ_results, runner=None, show_dir=None, save=False, **eval_kwargs):
    #     from .occ_metrics import Metric_mIoU, Metric_FScore
    #     if show_dir is not None:
    #         # import os
    #         # if not os.path.exists(show_dir):

    #         mmcv.mkdir_or_exist(show_dir)
    #         mmcv.mkdir_or_exist(os.path.join(show_dir, 'occupancy_pred'))
    #         print('\n在{}文件夹保存用于可视化的output和gt。'.format(show_dir))
    #         begin= 0 # eval_kwargs.get('begin',None)

    #         end=1 if not save else len(occ_results) # eval_kwargs.get('end',None)
    #     self.occ_eval_metrics = Metric_mIoU(
    #         num_classes=18,
    #         use_lidar_mask=False,
    #         use_image_mask=True)
        
    #     self.eval_fscore = False
    #     if  self.eval_fscore:
    #         self.fscore_eval_metrics = Metric_FScore(
    #             leaf_size=10,
    #             threshold_acc=0.4,
    #             threshold_complete=0.4,
    #             voxel_size=[0.4, 0.4, 0.4],
    #             range=[-40, -40, -1, 40, 40, 5.4],
    #             void=[17, 255],
    #             use_lidar_mask=False,
    #             use_image_mask=True,
    #         )
    #     count = 0
    #     print('\nStarting Evaluation...')
    #     processed_set = set()
    #     for occ_pred_w_index in tqdm(occ_results):
    #         index = occ_pred_w_index['index']
    #         if index in processed_set: continue
    #         processed_set.add(index)

    #         occ_pred = occ_pred_w_index['pred_occ']
    #         info = self.data_infos[index]
    #         scene_name = info['scene_name']
    #         sample_token = info['token']
    #         occupancy_file_path = osp.join(os.path.join(info['occ_path'], 'labels.npz'))
    #         occ_gt = np.load(occupancy_file_path)
 
    #         gt_semantics = occ_gt['semantics']
    #         mask_lidar = occ_gt['mask_lidar'].astype(bool)
    #         mask_camera = occ_gt['mask_camera'].astype(bool)            

    #         self.occ_eval_metrics.add_batch(
    #             occ_pred['occ_pred'] if (isinstance(occ_pred, dict) and 'occ_pred' in occ_pred) else occ_pred, 
    #             gt_semantics, 
    #             mask_lidar, 
    #             mask_camera
    #             )
    #         if self.eval_fscore:
    #             self.fscore_eval_metrics.add_batch(
    #                 occ_pred[mask_camera], 
    #                 gt_semantics, 
    #                 mask_lidar, 
    #                 mask_camera
    #                 )
   
    #     # Get results
    #     res_raw = self.occ_eval_metrics.count_miou()

    #     if isinstance(res_raw, tuple) and len(res_raw) == 3:
    #         class_names, ious, sample_count = res_raw
    #         res = {
    #             'mIoU_per_class': dict(zip(class_names, ious.tolist())),   # 每类IoU字典
    #             'mIoU_mean_all': float(np.nanmean(ious)),                  # 所有18类平均
    #             'mIoU_mean_wo_free': float(np.nanmean(ious[:len(class_names)-1])),  # 去掉'free'的平均
    #             'mIoU_num_samples': sample_count
    #         }
    #     else:
    #         raise TypeError(f"Unexpected count_miou() result format: {type(res_raw)}, content: {res_raw}")

    #     if self.eval_fscore:
    #         res.update(self.fscore_eval_metrics.count_fscore())

    #     return res



    def evaluate_occupancy(self, occ_results, runner=None, show_dir=None, save=False, **eval_kwargs):
        from .occ_metrics import Metric_mIoU, Metric_FScore
        import os.path as osp
        import numpy as np
        import mmcv
        from tqdm import tqdm

        if show_dir is not None:
            mmcv.mkdir_or_exist(show_dir)
            mmcv.mkdir_or_exist(os.path.join(show_dir, 'occupancy_pred'))
            print('\n在{}文件夹保存用于可视化的output和gt。'.format(show_dir))

        self.occ_eval_metrics = Metric_mIoU(
            num_classes=18,
            use_lidar_mask=False,
            use_image_mask=True)

        self.eval_fscore = False
        if self.eval_fscore:
            self.fscore_eval_metrics = Metric_FScore(
                leaf_size=10,
                threshold_acc=0.4,
                threshold_complete=0.4,
                voxel_size=[0.4, 0.4, 0.4],
                range=[-40, -40, -1, 40, 40, 5.4],
                void=[17, 255],
                use_lidar_mask=False,
                use_image_mask=True,
            )

        print('\nStarting Evaluation...')
        processed_set = set()
        for occ_pred_w_index in tqdm(occ_results):
            index = occ_pred_w_index['index']
            if index in processed_set:
                continue
            processed_set.add(index)

            occ_pred = occ_pred_w_index['pred_occ']
            info = self.data_infos[index]
            occupancy_file_path = osp.join(info['occ_path'], 'labels.npz')
            occ_gt = np.load(occupancy_file_path)

            gt_semantics = occ_gt['semantics']
            mask_lidar = occ_gt['mask_lidar'].astype(bool)
            mask_camera = occ_gt['mask_camera'].astype(bool)

            occ_pred_final = occ_pred['occ_pred'] if (isinstance(occ_pred, dict) and 'occ_pred' in occ_pred) else occ_pred

            self.occ_eval_metrics.add_batch(
                occ_pred_final,
                gt_semantics,
                mask_lidar,
                mask_camera
            )

            if self.eval_fscore:
                self.fscore_eval_metrics.add_batch(
                    occ_pred_final[mask_camera],
                    gt_semantics[mask_camera],
                    mask_lidar[mask_camera],
                    mask_camera[mask_camera]
                )

        # Get results
        res_raw = self.occ_eval_metrics.count_miou()

        if isinstance(res_raw, tuple) and len(res_raw) == 3:
            class_names, ious, sample_count = res_raw
            res = {
                'mIoU_per_class': dict(zip(class_names, ious.tolist())),
                'mIoU_mean_all': float(np.nanmean(ious)),
                'mIoU_mean_wo_free': float(np.nanmean(ious[:len(class_names) - 1])),
                'mIoU_num_samples': sample_count
            }

            # ✅ 格式化打印
            print("\n📊 Per-Class IoU (in %):\n")
            print(f"{'Class Name':<25} | {'IoU (%)':>8}")
            print("-" * 38)
            for cls, val in zip(class_names, ious):
                print(f"{cls:<25} | {val * 100:>8.2f}")
            print("-" * 38)
            print(f"{'Mean IoU (all 18)':<25} | {res['mIoU_mean_all'] * 100:>8.2f}")
            print(f"{'Mean IoU (wo free)':<25} | {res['mIoU_mean_wo_free'] * 100:>8.2f}")
            print(f"{'Sample count':<25} | {res['mIoU_num_samples']:>8}")

        else:
            raise TypeError(f"Unexpected count_miou() result format: {type(res_raw)}, content: {res_raw}")

        if self.eval_fscore:
            res.update(self.fscore_eval_metrics.count_fscore())

        return res


    def evaluate_mask(self, results):
        results_dict = {}
        iou = 0
        # ret_f1=[0,0,0,0,0]
        for i in range(len(results)):
            iou+=results[i]['iou']
        n=len(results)
        iou = iou/n
        results_dict['iou'] = iou
        return results_dict

    def evaluate_tracking(self,
                          results,
                          metric='bbox',
                          logger=None,
                          jsonfile_prefix=None,
                          result_names=['pts_bbox'],
                          show=False,
                          out_dir=None,
                          pipeline=None):
        """Evaluation in nuScenes protocol.

        Args:
            results (list[dict]): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            jsonfile_prefix (str | None): The prefix of json files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.
            show (bool): Whether to visualize.
                Default: False.
            out_dir (str): Path to save the visualization results.
                Default: None.
            pipeline (list[dict], optional): raw data loading for showing.
                Default: None.

        Returns:
            dict[str, float]: Results of each evaluation metric.
        """
        result_files, tmp_dir, with_motion = self.format_tracking_results(results, jsonfile_prefix)

        if isinstance(result_files, dict):
            results_dict = dict()
            for name in result_names:                    
                print('Evaluating tracking bboxes of {}'.format(name))
                ret_dict = self._evaluate_tracking_single(result_files[name])
                results_dict.update(ret_dict)
                if with_motion:
                    print('Evaluating motion bboxes of {}'.format(name))
                    ret_dict = self._evaluate_motion_single(result_files[name])
                    results_dict.update(ret_dict)

        elif isinstance(result_files, str):
            results_dict = self._evaluate_tracking_single(result_files)
            if with_motion:
                print('Evaluating motion bboxes of')
                ret_dict = self._evaluate_motion_single(result_files)
                results_dict.update(ret_dict)

        if tmp_dir is not None:
            tmp_dir.cleanup()

        if show:
            self.show(results, out_dir, pipeline=pipeline)
        return results_dict

    def evaluate_bbox(self,
                 results,
                 metric='bbox',
                 logger=None,
                 jsonfile_prefix='test',
                 result_names=['pts_bbox'],
                 show=False,
                 out_dir=None,
                 pipeline=None):
        """Evaluation in nuScenes protocol.

        Args:
            results (list[dict]): Testing results of the dataset.
            metric (str | list[str], optional): Metrics to be evaluated.
                Default: 'bbox'.
            logger (logging.Logger | str, optional): Logger used for printing
                related information during evaluation. Default: None.
            jsonfile_prefix (str, optional): The prefix of json files including
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.
            show (bool, optional): Whether to visualize.
                Default: False.
            out_dir (str, optional): Path to save the visualization results.
                Default: None.
            pipeline (list[dict], optional): raw data loading for showing.
                Default: None.

        Returns:
            dict[str, float]: Results of each evaluation metric.
        """
        result_files, tmp_dir = self.format_results(results, jsonfile_prefix)


        if isinstance(result_files, dict):
            results_dict = dict()
            for name in result_names:
                print('Evaluating bboxes of {}'.format(name))
                ret_dict = self._evaluate_single(result_files[name])
            results_dict.update(ret_dict)
        elif isinstance(result_files, str):
            results_dict = self._evaluate_single(result_files)

        if tmp_dir is not None:
            tmp_dir.cleanup()

        if show or out_dir:
            self.show(results, out_dir, show=show, pipeline=pipeline)

        return results_dict
    

    def format_tracking_results(self, results, jsonfile_prefix=None):
        """Format the results to json (standard format for COCO evaluation).

        Args:
            results (list[dict]): Testing results of the dataset.
            jsonfile_prefix (str): The prefix of json files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.

        Returns:
            tuple: Returns (result_files, tmp_dir), where `result_files` is a
                dict containing the json filepaths, `tmp_dir` is the temporal
                directory created for saving json files when
                `jsonfile_prefix` is not specified.
        """
        assert isinstance(results, list), 'results must be a list'
        assert len(results) >= len(self), (
            'The length of results is not equal to the dataset len: {} != {}'.
            format(len(results), len(self)))

        if jsonfile_prefix is None:
            tmp_dir = tempfile.TemporaryDirectory()
            jsonfile_prefix = osp.join(tmp_dir.name, 'results')
        else:
            tmp_dir = None

        # currently the output prediction results could be in two formats
        # 1. list of dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...)
        # 2. list of dict('pts_bbox' or 'img_bbox':
        #     dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...))
        # this is a workaround to enable evaluation of both formats on nuScenes
        # refer to https://github.com/open-mmlab/mmdetection3d/issues/449
        if not ('pts_bbox' in results[0] or 'img_bbox' in results[0]):
            result_files, with_motion = self._format_tracking_bbox(results, jsonfile_prefix)
        else:
            # should take the inner dict out of 'pts_bbox' or 'img_bbox' dict
            result_files = dict()
            for name in ['pts_bbox']:
                print(f'\nFormating tracking bboxes of {name}')
                results_ = [out[name] for out in results]
                tmp_file_ = osp.join(jsonfile_prefix, name)
                result_file, with_motion = self._format_tracking_bbox(results_, tmp_file_)
                result_files.update(
                    {name: result_file})
        return result_files, tmp_dir, with_motion

    def _format_tracking_bbox(self, results, jsonfile_prefix=None):
        """Convert the results to the standard format.

        Args:
            results (list[dict]): Testing results of the dataset.
            jsonfile_prefix (str): The prefix of the output jsonfile.
                You can specify the output directory/filename by
                modifying the jsonfile_prefix. Default: None.

        Returns:
            str: Path of the output json file.
        """
        nusc_annos = {}
        mapped_class_names = self.CLASSES
        print('Start to convert tracking format...')
        processed_set = set()
        with_motion = False
        for sample_id, det in enumerate(mmcv.track_iter_progress(results)):
            boxes = det['boxes_3d'].tensor.numpy()
            # scores = det['scores_3d'].numpy()
            labels = det['labels_3d'].numpy()
            sample_id = det.get('index', sample_id)
            if 'track_scores' not in det:
                print('no tracking info')
                return None, with_motion
            tracking_scores = det['track_scores'].numpy()
            
            obj_idxes = det['obj_idxes'].numpy()
            if sample_id in processed_set: continue
            processed_set.add(sample_id)
            sample_token = self.data_infos[sample_id]['token']
  
            trans = self.data_infos[sample_id]['cams'][
                self.ego_cam]['ego2global_translation']
            rot = self.data_infos[sample_id]['cams'][
                self.ego_cam]['ego2global_rotation']
            rot = pyquaternion.Quaternion(rot)
            annos = list()

            for i, box in enumerate(boxes):
                if tracking_scores[i] < 0: continue
                name = mapped_class_names[labels[i]]
                if name not in self.TRACKING_CLASSES: continue
                center = box[:3]
                wlh = box[[4, 3, 5]]
                box_yaw = box[6]
                box_vel = box[7:].tolist()
                box_vel.append(0)
                quat =  pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw)
                nusc_box = NuScenesBox(center, wlh, quat, velocity=box_vel)
                nusc_box.rotate(rot)
                nusc_box.translate(trans)
                if np.sqrt(nusc_box.velocity[0]**2 +
                           nusc_box.velocity[1]**2) > 0.2:
                    if name in [
                            'car',
                            'construction_vehicle',
                            'bus',
                            'truck',
                            'trailer',
                    ]:
                        attr = 'vehicle.moving'
                    elif name in ['bicycle', 'motorcycle']:
                        attr = 'cycle.with_rider'
                    else:
                        attr = self.DefaultAttribute[name]
                else:
                    if name in ['pedestrian']:
                        attr = 'pedestrian.standing'
                    elif name in ['bus']:
                        attr = 'vehicle.stopped'
                    else:
                        attr = self.DefaultAttribute[name]
                nusc_anno = dict(
                    sample_token=sample_token,
                    translation=nusc_box.center.tolist(),
                    size=nusc_box.wlh.tolist(),
                    rotation=nusc_box.orientation.elements.tolist(),
                    velocity=nusc_box.velocity[:2],
                    tracking_name=name,
                    detection_name=name,
                    detection_score=float(tracking_scores[i]),
                    attribute_name=attr,
                    tracking_score=float(tracking_scores[i]),
                    tracking_id=obj_idxes[i]
                )
                if 'motion_traj' in det:
                    with_motion = True
                    nusc_anno['traj'] = det['motion_traj'][i]
                    nusc_anno['traj_scores'] = det['motion_cls'][i]
                annos.append(nusc_anno)
            # other views results of the same frame should be concatenated
            if sample_token in nusc_annos:
                pass
                # nusc_annos[sample_token].extend(annos)
            else:
                nusc_annos[sample_token] = annos
        nusc_submissions = {
            'meta': self.modality,
            'results': nusc_annos,
        }

        mmcv.mkdir_or_exist(jsonfile_prefix)
        res_path = osp.join(jsonfile_prefix, 'results_nusc_tracking.json')
        print('Results writes to', res_path)
        mmcv.dump(nusc_submissions, res_path)
        return res_path, with_motion



    def _evaluate_motion_single(self,
                                  result_path,
                                  logger=None,
                                  metric='bbox',
                                  result_name='pts_bbox'):
        """Evaluation for a single model in nuScenes protocol.

        Args:
            result_path (str): Path of the result file.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            metric (str): Metric name used for evaluation. Default: 'bbox'.
            result_name (str): Result name in the metric prefix.
                Default: 'pts_bbox'.

        Returns:
            dict: Dictionary of evaluation details.
        """
        if result_path is None: return {}
        from nuscenes import NuScenes
        output_dir = osp.join(*osp.split(result_path)[:-1])
        eval_set_map = {
            'v1.0-mini': 'mini_val',
            'v1.0-trainval': 'val',
        }
        from .evals.nuscenes_eval_motion import MotionEval
        if self.nusc is None:
            self.nusc = NuScenes(version=self.version, dataroot=self.data_root, verbose=False)
        self.nusc_eval_motion = MotionEval(
            self.nusc,
            config=self.eval_detection_configs,
            result_path=result_path,
            eval_set=eval_set_map[self.version],
            output_dir=output_dir,
            verbose=True,
            data_infos=self.data_infos,
            ann_file=self.ann_file,
            category_convert_type='motion_category'
        )
        print('-'*50)
        print(
            'Evaluate on motion category, merge class for vehicles and pedestrians...')
        print('evaluate standard motion metrics...')
        self.nusc_eval_motion.main(
            plot_examples=0,
            render_curves=False,
            eval_mode='standard')
        print('evaluate motion mAP-minFDE metrics...')
        self.nusc_eval_motion.main(
            plot_examples=0,
            render_curves=False,
            eval_mode='motion_map')
        print('evaluate EPA motion metrics...')
        self.nusc_eval_motion.main(
            plot_examples=0,
            render_curves=False,
            eval_mode='epa')
        print('-'*50)
        print('Evaluate on detection category...')
        self.nusc_eval_motion = MotionEval(
            self.nusc,
            config=self.eval_detection_configs,
            result_path=result_path,
            eval_set=eval_set_map[self.version],
            output_dir=output_dir,
            verbose=True,
            data_infos=self.data_infos,
            category_convert_type='detection_category'
        )
        print('evaluate standard motion metrics...')
        self.nusc_eval_motion.main(
            plot_examples=0,
            render_curves=False,
            eval_mode='standard')
        print('evaluate EPA motion metrics...')
        self.nusc_eval_motion.main(
            plot_examples=0,
            render_curves=False,
            eval_mode='motion_map')
        print('evaluate EPA motion metrics...')
        self.nusc_eval_motion.main(
            plot_examples=0,
            render_curves=False,
            eval_mode='epa')
        return {}
            
    def _evaluate_tracking_single(self,
                                  result_path,
                                  logger=None,
                                  metric='bbox',
                                  result_name='pts_bbox'):
        """Evaluation for a single model in nuScenes protocol.

        Args:
            result_path (str): Path of the result file.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            metric (str): Metric name used for evaluation. Default: 'bbox'.
            result_name (str): Result name in the metric prefix.
                Default: 'pts_bbox'.

        Returns:
            dict: Dictionary of evaluation details.
        """
        if result_path is None: return {}
        from nuscenes import NuScenes
        output_dir = osp.join(*osp.split(result_path)[:-1])

        eval_set_map = {
            'v1.0-mini': 'mini_val',
            'v1.0-trainval': 'val',
        }
        from nuscenes.eval.tracking.evaluate import TrackingEval
        from nuscenes.eval.common.config import config_factory as track_configs

        cfg = track_configs("tracking_nips_2019")
        nusc_eval = TrackingEval(
            config=cfg,
            result_path=result_path,
            eval_set=eval_set_map[self.version],
            output_dir=output_dir,
            verbose=True,
            nusc_version=self.version,
            nusc_dataroot=self.data_root
        )
        metrics = nusc_eval.main()
        # record metrics
        metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))
        print(metrics)
        detail = dict()
        metric_prefix = f'{result_name}_NuScenes'
        keys = ['amota', 'amotp', 'recall', 'motar',
                'gt', 'mota', 'motp', 'mt', 'ml', 'faf',
                'tp', 'fp', 'fn', 'ids', 'frag', 'tid', 'lgd']
        for key in keys:
            detail['{}/{}'.format(metric_prefix, key)] = metrics[key]
        return detail       
