import copy
import pickle
import argparse
import os
from os import path as osp
import torch
from av2.utils.io import read_feather
import numpy as np
import multiprocessing as mp
import pickle as pkl
from pathlib import Path
import pandas as pd
from typing import Dict, Final
from tqdm import tqdm
from skimage import io

from pcdet.datasets.stereo_dataset_template import StereoDatasetTemplate
from pcdet.utils.calibration_argo2 import Calibration
from pcdet.utils import common_utils, box_utils, depth_map_utils
from pcdet.ops.roiaware_pool3d import roiaware_pool3d_utils
from pcdet.datasets.argo2.argo2_utils.so3 import yaw_to_quat, quat_to_yaw

# from .argo2_utils.so3 import yaw_to_quat, quat_to_yaw
# from .argo2_utils.constants import LABEL_ATTR
import av2.utils.io as io_utils
from av2.structures.cuboid import CuboidList

from av2.datasets.sensor.sensor_dataloader import LIDAR_PATTERN
from av2.datasets.sensor.utils import convert_path_to_named_record
from av2.utils.io import  read_city_SE3_ego, read_feather
from av2.utils.synchronization_database import SynchronizationDB
from av2.datasets.sensor.constants import AnnotationCategories
from av2.geometry.camera.pinhole_camera import PinholeCamera

from av2.datasets.sensor.constants import RingCameras, StereoCameras
from av2.evaluation.detection.constants import CompetitionCategories

from av2.geometry.se3 import SE3
import sys
sys.path.append("/home/yaohan.lu/unitr_yc/stereo-dsgn2")

dataset_dir = Path("/home/yaohan.lu/unitr_yc/stereo-dsgn2/data/argo2") # replace with absolute path
CAMERAS = tuple(x.value for x in StereoCameras)
LABEL_ATTR = (
    "tx_m","ty_m","tz_m","length_m","width_m","height_m","qw","qx","qy","qz",
)
AV2_ANNO_NAMES_TO_INDEX: Final[Dict[str, int]] = {
    x.value: i for i, x in enumerate(AnnotationCategories)
}

DATASET_TO_TAXONOMY: Final[Dict[str, Dict[str, int]]] = {
    "av2": AV2_ANNO_NAMES_TO_INDEX,
}
CompetitionCLASSES = tuple(x.value for x in CompetitionCategories)

def process_single_segment(segment_path, split, info_list, ts2idx, output_dir, save_bin):
    test_mode = 'test' in split
    if not test_mode:
        segment_anno = read_feather(Path(osp.join(segment_path, 'annotations.feather')))
    segname = segment_path.split('/')[-1]

    frame_path_list = os.listdir(osp.join(segment_path, 'sensors/lidar/'))

    for frame_name in frame_path_list:
        ts = int(osp.basename(frame_name).split('.')[0])

        if not test_mode:
            frame_anno = segment_anno[segment_anno['timestamp_ns'] == ts]
        else:
            frame_anno = None

        frame_path = osp.join(segment_path, 'sensors/lidar/', frame_name)
        frame_info = process_and_save_frame(frame_path, frame_anno, ts2idx, segname, output_dir, save_bin)
        info_list.append(frame_info)


# def process_and_save_frame(frame_path, frame_anno, ts2idx, segname, output_dir, save_bin):
#     frame_info = {}
#     frame_info['uuid'] = segname + '/' + frame_path.split('/')[-1].split('.')[0]
#     frame_info['sample_idx'] = ts2idx[frame_info['uuid']]
#     frame_info['image'] = dict()
#     frame_info['point_cloud'] = dict(
#         num_features=4,
#         velodyne_path=None,
#     )
#     ### process calib
    
#     intrinsics_df = io_utils.read_feather(intrinsics_path).set_index("sensor_name")
#     frame_info['calib'] = dict()  # not need for lidar-only
#     frame_info['pose'] = dict()  # not need for single frame
#     frame_info['annos'] = dict(
#         name=None,
#         truncated=None,
#         occluded=None,
#         alpha=None,
#         bbox=None,  # not need for lidar-only
#         dimensions=None,
#         location=None,
#         rotation_y=None,
#         index=None,
#         group_ids=None,
#         camera_id=None,
#         difficulty=None,
#         num_points_in_gt=None,
#     )
#     frame_info['sweeps'] = []  # not need for single frame
#     if frame_anno is not None:
#         frame_anno = frame_anno[frame_anno['num_interior_pts'] > 0]
#         cuboid_params = frame_anno.loc[:, list(LABEL_ATTR)].to_numpy()
#         cuboid_params = torch.from_numpy(cuboid_params)
#         yaw = quat_to_yaw(cuboid_params[:, -4:])
#         xyz = cuboid_params[:, :3]
#         lwh = cuboid_params[:, [3, 4, 5]]

#         cat = frame_anno['category'].to_numpy().tolist()
#         cat = [c.lower().capitalize() for c in cat]
#         cat = np.array(cat)

#         num_obj = len(cat)

#         annos = frame_info['annos']
#         annos['name'] = cat
#         annos['truncated'] = np.zeros(num_obj, dtype=np.float64)
#         annos['occluded'] = np.zeros(num_obj, dtype=np.int64)
#         annos['alpha'] = -10 * np.ones(num_obj, dtype=np.float64)
#         annos['dimensions'] = lwh.numpy().astype(np.float64)
#         annos['location'] = xyz.numpy().astype(np.float64)
#         annos['rotation_y'] = yaw.numpy().astype(np.float64)
#         annos['index'] = np.arange(num_obj, dtype=np.int32)
#         annos['num_points_in_gt'] = frame_anno['num_interior_pts'].to_numpy().astype(np.int32)
#     # frame_info['group_ids'] = np.arange(num_obj, dtype=np.int32)
#     prefix2split = {'0': 'training', '1': 'training', '2': 'testing'}
#     sample_idx = frame_info['sample_idx']
#     split = prefix2split[sample_idx[0]]
#     abs_save_path = osp.join(output_dir, split, 'velodyne', f'{sample_idx}.bin')
#     rel_save_path = osp.join(split, 'velodyne', f'{sample_idx}.bin')
#     frame_info['point_cloud']['velodyne_path'] = rel_save_path
#     if save_bin:
#         save_point_cloud(frame_path, abs_save_path)
#     return frame_info


# def save_point_cloud(frame_path, save_path):
#     lidar = read_feather(Path(frame_path))
#     lidar = lidar.loc[:, ['x', 'y', 'z', 'intensity']].to_numpy().astype(np.float32)
#     lidar.tofile(save_path)


def prepare(root):
    ts2idx = {}
    ts_list = []
    bin_idx_list = []
    seg_path_list = []
    seg_split_list = []
    assert root.split('/')[-1] == 'sensor'
    # include test if you need it
    splits = ['train', 'val']  # , 'test']
    num_train_samples = 0
    num_val_samples = 0
    num_test_samples = 0

    # 0 for training, 1 for validation and 2 for testing.
    prefixes = [0, 1, ]  # 2]

    for i in range(len(splits)):
        split = splits[i]
        prefix = prefixes[i]
        split_root = osp.join(root, split)
        seg_file_list = os.listdir(split_root)
        print(f'num of {split} segments:', len(seg_file_list))
        for seg_idx, seg_name in enumerate(seg_file_list):
            seg_path = osp.join(split_root, seg_name)
            seg_path_list.append(seg_path)
            seg_split_list.append(split)
            assert seg_idx < 1000
            frame_path_list = os.listdir(osp.join(seg_path, 'sensors/lidar/'))
            for frame_idx, frame_path in enumerate(frame_path_list):
                assert frame_idx < 1000
                bin_idx = str(prefix) + str(seg_idx).zfill(3) + str(frame_idx).zfill(3)
                ts = frame_path.split('/')[-1].split('.')[0]
                ts = seg_name + '/' + ts  # ts is not unique, so add seg_name
                ts2idx[ts] = bin_idx
                ts_list.append(ts)
                bin_idx_list.append(bin_idx)
        if split == 'train':
            num_train_samples = len(ts_list)
        elif split == 'val':
            num_val_samples = len(ts_list) - num_train_samples
        else:
            num_test_samples = len(ts_list) - num_train_samples - num_val_samples
    # print three num samples
    print('num of train samples:', num_train_samples)
    print('num of val samples:', num_val_samples)
    print('num of test samples:', num_test_samples)

    assert len(ts_list) == len(set(ts_list))
    assert len(bin_idx_list) == len(set(bin_idx_list))
    return ts2idx, seg_path_list, seg_split_list

def create_argo2_infos(seg_path_list, seg_split_list, info_list, ts2idx, output_dir, save_bin, token, num_process):
    """OpenPCDet Lidar only"""
    for seg_i, seg_path in enumerate(seg_path_list):
        if seg_i % num_process != token:
            continue
        print(f'processing segment: {seg_i}/{len(seg_path_list)}')
        split = seg_split_list[seg_i]
        process_single_segment(seg_path, split, info_list, ts2idx, output_dir, save_bin)


def create_av2_infos(dataset_dir, split, out_dir):
    src_dir = dataset_dir / split
    paths = sorted(src_dir.glob(LIDAR_PATTERN), key=lambda x: int(x.stem))   #每个split包含的所有lidar帧路径
    records = [convert_path_to_named_record(p) for p in paths]      #每帧信息：split，log_id, sensor_name, timestamp_ns
    sensor_caches = pd.DataFrame(records)  #pandas形式
    sensor_caches.set_index(["log_id", "sensor_name", "timestamp_ns"], inplace=True) #将相同场景id的对应帧放在一起
    sensor_caches.sort_index(inplace=True) #根据场景id排序
    loader = SynchronizationDB(dataset_dir=src_dir)
    av2_split_infos = []
    for i in tqdm(range(len(sensor_caches))): 
        # if i % 5 != 0: # to create mini pkl for debug
        #     continue
        infos = {}
        record = sensor_caches.iloc[i].name
        log_id, _, lidar_timestamp_ns = record
        log_dir = src_dir / log_id
        infos['scene_id'] = log_id
        infos['lidar_timestamp_ns'] = lidar_timestamp_ns
        cam_infos = {}
        
        #根据lidar timestamp取对应相机的timestamp
        cam_name = 'stereo_front_left'
        cam_timestamp_ns = loader.get_closest_cam_channel_timestamp(
            lidar_timestamp=lidar_timestamp_ns, cam_name=cam_name, log_id=log_id)
        right_image_path = log_dir / "sensors" / "cameras" / "stereo_front_right" / f"{cam_timestamp_ns}.jpg"
        if cam_timestamp_ns is None :
            print("No corresponding image")
            continue
        if not right_image_path.exists():
            print("No corresponding right image")
            continue
        
        fpath = Path(split) / log_id / "sensors" / "cameras" / cam_name / f"{cam_timestamp_ns}.jpg" #img path
        cam_infos[cam_name] = dict(
            fpath=fpath,
            cam_timestamp_ns=cam_timestamp_ns,
        )
        infos['image'] = cam_infos
        calib_info = load_log_calib(log_dir)
        if calib_info:
            infos['calib'] = calib_info
        else:
            print(f'Fail to load calib:{log_dir}')
            continue
        ego_SE3_pseudo_lidar = io_utils.read_ego_SE3_sensor(log_dir)['stereo_front_left']
        to_pseudo_lidar = SE3(rotation=np.array([[0,0,1],[-1,0,0],[0,-1,0]]), translation=np.array([0,0,0]))
        annotations = _load_annotations(split, log_id, lidar_timestamp_ns)
        annotations_psuedo_lidar = annotations.transform(ego_SE3_pseudo_lidar.inverse())
        annotations_psuedo_lidar = annotations_psuedo_lidar.transform(to_pseudo_lidar)
        
        xyz = annotations_psuedo_lidar.xyz_center_m
        lwh = annotations_psuedo_lidar.dims_lwh_m
        num_obj = len(xyz)

        annos = {}
        annos['name'] = np.array(annotations.categories)
        annos['truncated'] = np.zeros(num_obj)
        annos['occluded'] = np.zeros(num_obj)
        annos['alpha'] = -10 * np.ones(num_obj, dtype=np.float64)
        # annotations['bbox'] = 
        annos['dimensions'] = lwh
        annos['location'] = xyz
        annos['rotation_y'] = np.array([SE3_rotationz(anno.dst_SE3_object) for anno in annotations.cuboids]) # Yaw
        infos['annos'] = annos
        
        av2_split_infos.append(infos)

    print('{}_sample:{}'.format(split, len(av2_split_infos)))
    info_path = osp.join(out_dir, 'av2_{}_infos_stereo.pkl'.format(split))
    with open(info_path, 'wb') as f:
        pickle.dump(av2_split_infos, f)
    print('Argo2 info file is saved to %s' % info_path)
    
def load_log_calib(log_dir):
    intrinsics_path = log_dir / "calibration" / "intrinsics.feather"
    intrinsics_df = io_utils.read_feather(intrinsics_path).set_index("sensor_name")
    params = intrinsics_df.loc['stereo_front_left']
    intrinsics = intrinsics_matrix(
        fx=params["fx_px"],
        fy=params["fy_px"],
        cx=params["cx_px"],
        cy=params["cy_px"],
    ) #内参矩阵
    intrinsics_3x4 = np.concatenate([intrinsics, np.zeros((3, 1))], axis=1) #3x4
    # distortion = intrinsics_df.loc[cam_name, ["k1", "k2", "k3"]] #畸变系数
    sensor_name_to_pose = io_utils.read_ego_SE3_sensor(log_dir)
    if 'stereo_front_left' not in sensor_name_to_pose.keys():
        return None
    ego_SE3_cam = sensor_name_to_pose['stereo_front_left'] #cam2ego

    ### Right camera
    params = intrinsics_df.loc['stereo_front_right']
    intrinsics_r = intrinsics_matrix(
        fx=params["fx_px"],
        fy=params["fy_px"],
        cx=params["cx_px"],
        cy=params["cy_px"],
    ) #内参矩阵
    K3 = np.concatenate([intrinsics_r, np.zeros((3, 1))], axis=1) #3x4
    
    ego_SE3_stereo_right = sensor_name_to_pose['stereo_front_right']
    
    left_to_right = np.dot(ego_SE3_stereo_right.inverse().transform_matrix, ego_SE3_cam.transform_matrix)
    P3 = np.dot(K3 , left_to_right)
    calib_info = {'P2':intrinsics_3x4, 'P3': P3, 'R0_rect':np.eye(3, dtype=float), 'Tr_velo_to_cam':ego_SE3_cam.inverse().transform_matrix}
    return calib_info
    
def _load_annotations(split, log_id, timestamp_ns):
    annotations_feather_path = dataset_dir / split / log_id / "annotations.feather"

    # Load annotations from disk.
    # NOTE: This contains annotations for the ENTIRE sequence.
    # The sweep annotations are selected below.
    cuboid_list = CuboidList.from_feather(annotations_feather_path)
    cuboids = list(filter(lambda x: x.timestamp_ns == timestamp_ns, cuboid_list.cuboids))
    return CuboidList(cuboids=cuboids)

def SE3_rotationz(SE3_obj:SE3):
    rotation_mat = SE3_obj.rotation
    theta_z = np.arctan2(rotation_mat[1,0],rotation_mat[0,0])
    return theta_z

def intrinsics_matrix(fx, fy, cx, cy):
    K = np.eye(3, dtype=float)
    K[0, 0] = fx
    K[1, 1] = fy
    K[0, 2] = cx
    K[1, 2] = cy
    return K

class Argo2Dataset(StereoDatasetTemplate):
    def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
        """
        Args:
            root_path:
            dataset_cfg:
            class_names:
            training:
            logger:
        """
        super().__init__(
            dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
        )
        self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
        # self.root_split_path = self.root_path / ('train' if self.split != 'test' else 'val')
        self.root_split_path = self.root_path / self.split

        split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
        self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None

        self.argo2_infos = []
        self.include_argo2_data(self.mode)
        self.evaluate_range = dataset_cfg.get("EVALUATE_RANGE", 200.0)

    def include_argo2_data(self, mode):
        if self.logger is not None:
            self.logger.info('Loading Argoverse2 dataset')
        argo2_infos = []

        for info_path in self.dataset_cfg.INFO_PATH[mode]:
            info_path = self.root_path / info_path
            if not info_path.exists():
                self.logger.warning('Info path %s not found' % info_path)
                continue
            with open(info_path, 'rb') as f:
                infos = pickle.load(f)
                argo2_infos.extend(infos)

        self.argo2_infos.extend(argo2_infos)

        if self.logger is not None:
            self.logger.info('Total samples for Argo2 dataset: %d' % (len(argo2_infos)))

    def set_split(self, split):
        super().__init__(
            dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger
        )
        self.split = split
        self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing')

        split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
        self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None

    def get_lidar(self, scene_id, lidar_timestamp):
        lidar_file = self.root_split_path / scene_id / 'sensors' / 'lidar' / ('%s.feather' % lidar_timestamp)
        assert lidar_file.exists()
        lidar = read_feather(lidar_file)
        lidar = lidar.loc[:, ['x', 'y', 'z', 'intensity']].to_numpy().astype(np.float32)
        return lidar

    def get_image(self, scene_id, timestamp, cam_name):
        img_file = self.root_split_path / scene_id / 'sensors' / 'cameras' \
            / cam_name / ('%s.jpg' % timestamp)
        try:
            assert img_file.exists()
        except:
            print(f'img file not found: {img_file}')
            return None
        # assert img_file.exists()
        return io.imread(img_file).copy()
    
    @staticmethod
    def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
        """
        Args:
            batch_dict:
                frame_id:
            pred_dicts: list of pred_dicts
                pred_boxes: (N, 7), Tensor
                pred_scores: (N), Tensor
                pred_labels: (N), Tensor
            class_names:
            output_path:

        Returns:

        """
        def get_template_prediction(num_samples):
            ret_dict = {
                'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples),
                'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples),
                'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]),
                'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples),
                'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7])
            }
            return ret_dict

        def generate_single_sample_dict(batch_index, box_dict):
            pred_scores = box_dict['pred_scores'].cpu().numpy()
            pred_boxes = box_dict['pred_boxes'].cpu().numpy() # pseudo lidar
            pred_labels = box_dict['pred_labels'].cpu().numpy()
            pred_dict = get_template_prediction(pred_scores.shape[0])
            if pred_scores.shape[0] == 0:
                return pred_dict

            pred_boxes_img = pred_boxes
            # pred_boxes_camera = pred_boxes
            pred_boxes_camera = box_utils.boxes3d_pseudo_lidar_to_lidar(pred_boxes,batch_dict['calib'][batch_index])

            pred_dict['name'] = np.array(class_names)[pred_labels - 1]
            pred_dict['alpha'] = -np.arctan2(-pred_boxes[:, 1], pred_boxes[:, 0]) + pred_boxes_camera[:, 6]
            pred_dict['bbox'] = pred_boxes_img ### 实际评估的box
            pred_dict['dimensions'] = pred_boxes_camera[:, 3:6]
            pred_dict['location'] = pred_boxes_camera[:, 0:3]
            pred_dict['rotation_y'] = pred_boxes_camera[:, 6]
            pred_dict['score'] = pred_scores
            # pred_dict['boxes_lidar'] = pred_boxes

            return pred_dict

        annos = []
        for index, box_dict in enumerate(pred_dicts):
            frame_id = batch_dict['frame_id'][index]

            single_pred_dict = generate_single_sample_dict(index, box_dict)
            single_pred_dict['frame_id'] = frame_id
            annos.append(single_pred_dict)

            if output_path is not None:
                cur_det_file = output_path / ('%s.txt' % frame_id)
                with open(cur_det_file, 'w') as f:
                    bbox = single_pred_dict['bbox']
                    loc = single_pred_dict['location']
                    dims = single_pred_dict['dimensions']  # lhw -> hwl

                    for idx in range(len(bbox)):
                        print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f'
                              % (single_pred_dict['name'][idx], single_pred_dict['alpha'][idx],
                                 bbox[idx][0], bbox[idx][1], bbox[idx][2], bbox[idx][3],
                                 dims[idx][1], dims[idx][2], dims[idx][0], loc[idx][0],
                                 loc[idx][1], loc[idx][2], single_pred_dict['rotation_y'][idx],
                                 single_pred_dict['score'][idx]), file=f)

        return annos

    def __len__(self):
        if self._merge_all_iters_to_one_epoch:
            return len(self.argo2_infos) * self.total_epochs

        return len(self.argo2_infos)

    def __getitem__(self, index):
        # index = 4
        if self._merge_all_iters_to_one_epoch:
            index = index % len(self.argo2_infos)

        info = copy.deepcopy(self.argo2_infos[index])

        # sample_idx = info['point_cloud']['velodyne_path'].split('/')[-1].rstrip('.bin')
        scene_id = info['scene_id']
        lidar_timestamp_ns = info['lidar_timestamp_ns']


        calib = Calibration(info['calib'])
        get_item_list = self.dataset_cfg.get('GET_ITEM_LIST', ['points'])

    
        raw_points = self.get_lidar(scene_id, lidar_timestamp_ns)
        pts_rect = calib.lidar_to_rect(raw_points[:, 0:3])
        reflect = raw_points[:, 3:4]

        # Image
        cam_timestamp = info['image']['stereo_front_left']['cam_timestamp_ns']
        left_img = self.get_image(scene_id, cam_timestamp, 'stereo_front_left')
        right_img = self.get_image(scene_id, cam_timestamp, 'stereo_front_right')
        if left_img is None or right_img is None:
            return None
        # img_shape = info['image']['image_shape'] ### TODO
        img_shape = left_img.shape
        if self.dataset_cfg.FOV_POINTS_ONLY:
            fov_flag = self.get_fov_flag(pts_rect, img_shape, calib)
            pts_rect = pts_rect[fov_flag]
            reflect = reflect[fov_flag]


        input_points = calib.rect_to_lidar_pseudo(pts_rect)
        input_dict = {
            'scene_id': scene_id,
            'frame_id':lidar_timestamp_ns,
            'cam_timestamp':cam_timestamp,
            'points': input_points,
            'calib': calib,
            'left_img': left_img,
            'right_img': right_img,
            'image_shape': left_img.shape[:2]
            }

        if 'annos' in info:
            annos = info['annos']
            loc, dims, rots = annos['location'], annos['dimensions'], annos['rotation_y']
            gt_names = annos['name']
            gt_bboxes_3d = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)

            input_dict.update({
                'gt_names': np.array([x.upper() for x in gt_names]),
                'gt_boxes': gt_bboxes_3d # already in pseudo LiDAR coord
            })

        data_dict = self.prepare_data(data_dict=input_dict)

        return data_dict
    
    def prepare_data(self, data_dict):
        """
        Args:
            data_dict:
                points: (N, 3 + C_in)
                gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
                gt_names: optional, (N), string
                ...

        Returns:
            data_dict:
                frame_id: string
                points: (N, 3 + C_in)
                gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
                gt_names: optional, (N), string
                use_lead_xyz: bool
                voxels: optional (num_voxels, max_points_per_voxel, 3 + C)
                voxel_coords: optional (num_voxels, 3)
                voxel_num_points: optional (num_voxels)
                ...
        """
        
        if self.training:
            # import pdb;pdb.set_trace()
            if not 'gt_boxes' in data_dict:
                # TODO: in case using data augmentor, please pay attention to the coordinate
                data_dict = self.data_augmentor.forward(data_dict=data_dict)
            else:
                gt_boxes_mask = np.array(
                    [n.upper() in self.class_names for n in data_dict['gt_names']], dtype=np.bool_)

                # TODO: in case using data augmentor, please pay attention to the coordinate
                data_dict = self.data_augmentor.forward(
                    data_dict={
                        **data_dict,
                        'gt_boxes_mask': gt_boxes_mask
                    }
                )

                if self.training and len(data_dict['gt_boxes']) == 0:
                    new_index = np.random.randint(self.__len__())
                    return self.__getitem__(new_index)

                if self.training and len(data_dict['points']) < 200:
                    print(f'------- frame {data_dict["frame_id"]} got only {len(data_dict["points"])} points')
                    new_index = np.random.randint(self.__len__())
                    return self.__getitem__(new_index)

        elif (not self.training) and self.data_augmentor:
            # only do some basic image scaling and cropping
            data_dict = self.data_augmentor.forward(data_dict)

        if data_dict.get('gt_boxes', None) is not None:
            if 'gt_boxes_no3daug' not in data_dict:
                data_dict['gt_boxes_no3daug'] = data_dict['gt_boxes'].copy()
            # 根据self.class_names过滤类别
            selected = common_utils.keep_arrays_by_name(data_dict['gt_names'], self.class_names)
            if len(selected) != len(data_dict['gt_names']):
                for key in ['gt_names', 'gt_boxes', 'gt_truncated', 'gt_occluded', 'gt_difficulty', 'gt_index', 'gt_boxes_no3daug']:
                    if key in data_dict:
                        data_dict[key] = data_dict[key][selected]
            ## gt_boxes添加类别
            gt_classes = np.array([self.class_names.index(n) + 1 for n in data_dict['gt_names']], dtype=np.int32)
            data_dict['gt_boxes'] = np.concatenate(
                (data_dict['gt_boxes'], gt_classes.reshape(-1, 1).astype(np.float32)), axis=1)
            data_dict['gt_boxes_no3daug'] = np.concatenate(
                (data_dict['gt_boxes_no3daug'], gt_classes.reshape(-1, 1).astype(np.float32)), axis=1)

        # convert to 2d gt boxes
        image_shape = data_dict['left_img'].shape[:2]
        if 'gt_boxes' in data_dict:
            gt_boxes_no3daug = data_dict['gt_boxes_no3daug']
            gt_boxes_no3daug_cam = box_utils.boxes3d_lidar_to_kitti_camera(gt_boxes_no3daug, None, pseudo_lidar=True)
            gt_box_2d, mask_front = box_utils.boxes3d_kitti_camera_to_imageboxes(
                gt_boxes_no3daug_cam, data_dict['calib'], image_shape, fix_neg_z_bug=False, return_neg_z_mask=True)
            data_dict['gt_boxes_2d'] = gt_box_2d
            data_dict['gt_centers_2d'] = box_utils.boxes3d_kitti_camera_to_imagecenters(
                gt_boxes_no3daug_cam, data_dict['calib'], image_shape)
            data_dict['gt_boxes_2d'] = np.concatenate([data_dict['gt_boxes_2d'], gt_classes.reshape(-1, 1).astype(np.float32)], axis=1)
        
        if self.point_feature_encoder:
            data_dict = self.point_feature_encoder.forward(data_dict)
        if self.data_processor: # mask points outside
            data_dict = self.data_processor.forward(data_dict=data_dict) ###### Criminal!

        # generate depth gt image
        # uses no 3d augs
        points_no3daug = data_dict.get('points_no3daug', data_dict['points'])
        completion_points = data_dict.get('completion_points', points_no3daug )
        rect_points = Calibration.lidar_pseudo_to_rect(completion_points[:, :3])
        data_dict['depth_gt_img'] = depth_map_utils.points_to_depth_map(rect_points, image_shape, data_dict['calib'])
        if self.complete_and_predict_depth:
            rect_input_points = Calibration.lidar_pseudo_to_rect(points_no3daug[:, :3])
            data_dict['input_depth_gt_img'] = depth_map_utils.points_to_depth_map(rect_input_points, image_shape, data_dict['calib'])
        if 'gt_boxes_no3daug' in data_dict:
            data_dict['depth_fgmask_img'] = roiaware_pool3d_utils.depth_map_in_boxes_cpu(
                data_dict['depth_gt_img'], data_dict['gt_boxes_no3daug'][:, :7], data_dict['calib'], expand_distance=0., expand_ratio=1.0)

        if 'random_T' in data_dict:
            data_dict['inv_random_T'] = np.linalg.inv(data_dict['random_T'])

        data_dict.pop('points_no3daug', None)
        data_dict.pop('did_3d_transformation', None)
        data_dict.pop('road_plane', None)
        data_dict.pop('constraints',None)
        data_dict.pop('completion_points', None)

        return data_dict

    @staticmethod
    def get_fov_flag(pts_rect, img_shape, calib):
        """
        Args:
            pts_rect:
            img_shape:
            calib:

        Returns:

        """
        pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
        val_flag_1 = np.logical_and(
            pts_img[:, 0] > 0, pts_img[:, 0] < img_shape[1] - 1)
        val_flag_2 = np.logical_and(
            pts_img[:, 1] > 0, pts_img[:, 1] < img_shape[0] - 1)
        val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
        pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)

        return pts_valid_flag
    
    def format_results(self,
                       outputs,
                       class_names,
                       pklfile_prefix=None,
                       submission_prefix=None,
                       ):
        """Format the results to .feather file with argo2 format.

        Args:
            outputs (list[dict]): Testing results of the dataset.
            pklfile_prefix (str | None): The prefix of pkl files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.
            submission_prefix (str | None): The prefix of submitted files. It
                includes the file path and the prefix of filename, e.g.,
                "a/b/prefix". If not specified, a temp file will be created.
                Default: None.

        Returns:
            tuple: (result_files, tmp_dir), result_files is a dict containing
                the json filepaths, tmp_dir is the temporal directory created
                for saving json files when jsonfile_prefix is not specified.
        """
        import pandas as pd

        assert len(self.argo2_infos) == len(outputs)
        num_samples = len(outputs)
        print('\nGot {} samples'.format(num_samples))

        serialized_dts_list = []

        print('\nConvert predictions to Argoverse 2 format')
        for i in range(num_samples):
            out_i = outputs[i]
            if len(out_i['bbox']) == 0:
                continue
            # log_id, ts = self.argo2_infos[i]['uuid'].split('/')
            log_id = self.argo2_infos[i]['scene_id']
            ts = self.argo2_infos[i]['lidar_timestamp_ns']
            track_uuid = None
            #cat_id = out_i['labels_3d'].numpy().tolist()
            #category = [class_names[i].upper() for i in cat_id]
            category = [class_name.upper() for class_name in out_i['name']]
            serialized_dts = pd.DataFrame(
                self.lidar_box_to_argo2(out_i['bbox']).numpy(), columns=list(LABEL_ATTR)
            )
            serialized_dts["score"] = out_i['score']
            serialized_dts["log_id"] = log_id
            serialized_dts["timestamp_ns"] = int(ts)
            serialized_dts["category"] = category
            serialized_dts_list.append(serialized_dts)

        dts = (
            pd.concat(serialized_dts_list)
            .set_index(["log_id", "timestamp_ns"])
            .sort_index()
        )

        dts = dts.sort_values("score", ascending=False).reset_index()

        if pklfile_prefix is not None:
            if not pklfile_prefix.endswith(('.feather')):
                pklfile_prefix = f'{pklfile_prefix}.feather'
            dts.to_feather(pklfile_prefix)
            print(f'Result is saved to {pklfile_prefix}.')

        dts = dts.set_index(["log_id", "timestamp_ns"]).sort_index()

        return dts

    def lidar_box_to_argo2(self, boxes):
        boxes = torch.Tensor(boxes)
        cnt_xyz = boxes[:, :3]
        lwh = boxes[:, [3, 4, 5]]
        yaw = boxes[:, 6]

        quat = yaw_to_quat(yaw)
        argo_cuboid = torch.cat([cnt_xyz, lwh, quat], dim=1)
        return argo_cuboid

    def evaluation(self,
                 results,
                 class_names,
                 eval_metric='waymo',
                 logger=None,
                 pklfile_prefix=None,
                 submission_prefix=None,
                 show=False,
                 output_path=None,
                 pipeline=None):
        """Evaluation in Argo2 protocol.

        Args:
            results (list[dict]): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
                Default: 'waymo'. Another supported metric is 'Argo2'.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            pklfile_prefix (str | None): The prefix of pkl files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.
            submission_prefix (str | None): The prefix of submission datas.
                If not specified, the submission data will not be generated.
            show (bool): Whether to visualize.
                Default: False.
            out_dir (str): Path to save the visualization results.
                Default: None.
            pipeline (list[dict], optional): raw data loading for showing.
                Default: None.

        Returns:
            dict[str: float]: results of each evaluation metric
        """
        from av2.evaluation.detection.constants import CompetitionCategories
        from av2.evaluation.detection.utils import DetectionCfg
        from av2.evaluation.detection.eval import evaluate
        from av2.utils.io import read_feather
        ### TODO： dts and gts coords align?
        dts = self.format_results(results, class_names, pklfile_prefix, submission_prefix)
        argo2_root = self.root_path
        val_anno_path = osp.join(argo2_root, 'val_anno.feather')
        gts = read_feather(Path(val_anno_path))
        # limit distance
        gts['dist'] = np.sqrt(gts['tx_m']**2 + gts['ty_m']**2)
        gts=gts[gts['dist'] <= self.evaluate_range]

        # generate fov mask
        grouped = gts.groupby('log_id')
        to_pseudo_lidar = SE3(rotation=np.array([[0,0,1],[-1,0,0],[0,-1,0]]), translation=np.array([0,0,0]))
        from pcdet.utils.box_utils import mask_boxes_outside_range_numpy
        mask_fovs = []
        for log_id, group in grouped:
            gts_cublist = CuboidList.from_dataframe(group)
            log_dir = self.root_path / 'val' / log_id
            if not log_dir.exists():
                continue ### 临时处理，待数据完整 
            sensor_name_to_pose = io_utils.read_ego_SE3_sensor(log_dir)
            if 'stereo_front_left' not in sensor_name_to_pose:
                print(f'SE3 not found, skipping:{log_id}')
                continue
            ego_SE3_cam = sensor_name_to_pose['stereo_front_left'] #cam2ego
            annotations_psuedo_lidar = gts_cublist.transform(ego_SE3_cam.inverse())
            annotations_psuedo_lidar = annotations_psuedo_lidar.transform(to_pseudo_lidar)

            xyz = annotations_psuedo_lidar.xyz_center_m
            lwh = annotations_psuedo_lidar.dims_lwh_m
            calib_info = load_log_calib(log_dir)
            calib = Calibration(calib_info)
            image_shape = (1550,2048)
            center_rect = calib.lidar_pseudo_to_rect(xyz)
            mask_fov = self.get_fov_flag(center_rect, image_shape, calib)
            group = group[mask_fov]
            mask_fovs.append(group.index)
        combined_mask = gts.index.isin(np.concatenate(mask_fovs))
        gts = gts[combined_mask]

        gts = gts.set_index(["log_id", "timestamp_ns"]).sort_values("category")
        valid_uuids_gts = gts.index.tolist()
        valid_uuids_dts = dts.index.tolist()
        valid_uuids = set(valid_uuids_gts) & set(valid_uuids_dts)
        gts = gts.loc[list(valid_uuids)].sort_index()

        categories = set(x.value for x in CompetitionCategories)
        categories &= set(gts["category"].unique().tolist())

        dataset_dir = Path(argo2_root) / 'val'
        cfg = DetectionCfg(
            dataset_dir=dataset_dir,
            categories=tuple(sorted(categories)),
            max_range_m=self.evaluate_range,
            eval_only_roi_instances=True,
        )

        # Evaluate using Argoverse detection API.
        eval_dts, eval_gts, metrics = evaluate(
            dts.reset_index(), gts.reset_index(), cfg
        )

        valid_categories = sorted(categories) + ["AVERAGE_METRICS"]
        ap_dict = {}
        for index, row in metrics.iterrows():
            ap_dict[index] = row.to_json()
        return metrics.loc[valid_categories], ap_dict

def parse_config():
    parser = argparse.ArgumentParser(description='arg parser')
    parser.add_argument('--root_path', type=str, default="/data/argo2/sensor")
    parser.add_argument('--output_dir', type=str, default="/data/argo2/processed")
    args = parser.parse_args()
    return args

if __name__ == '__main__':
    splits = ["val"]
    # splits = ["val", "train"]
    for split in splits:
        create_av2_infos(
            dataset_dir=dataset_dir,
            split=split,
            out_dir=dataset_dir,
        )

# if __name__ == '__main__':
#     args = parse_config()
#     root = args.root_path
#     output_dir = args.output_dir
#     save_bin = True
#     ts2idx, seg_path_list, seg_split_list = prepare(root)

#     velodyne_dir = Path(output_dir) / 'training' / 'velodyne'
#     if not velodyne_dir.exists():
#         velodyne_dir.mkdir(parents=True, exist_ok=True)

#     info_list = []
#     create_argo2_infos(seg_path_list, seg_split_list, info_list, ts2idx, output_dir, save_bin, 0, 1)

#     assert len(info_list) > 0

#     train_info = [e for e in info_list if e['sample_idx'][0] == '0']
#     val_info = [e for e in info_list if e['sample_idx'][0] == '1']
#     test_info = [e for e in info_list if e['sample_idx'][0] == '2']
#     trainval_info = train_info + val_info
#     assert len(train_info) + len(val_info) + len(test_info) == len(info_list)

#     # save info_list in under the output_dir as pickle file
#     with open(osp.join(output_dir, 'argo2_infos_train.pkl'), 'wb') as f:
#         pkl.dump(train_info, f)

#     with open(osp.join(output_dir, 'argo2_infos_val.pkl'), 'wb') as f:
#         pkl.dump(val_info, f)

#     # save validation anno feather
#     save_feather_path = os.path.join(output_dir, 'val_anno.feather')
#     val_seg_path_list = [seg_path for seg_path in seg_path_list if 'val' in seg_path]
#     assert len(val_seg_path_list) == len([i for i in seg_split_list if i == 'val'])

#     seg_anno_list = []
#     for seg_path in val_seg_path_list:
#         seg_anno = read_feather(osp.join(seg_path, 'annotations.feather'))
#         log_id = seg_path.split('/')[-1]
#         seg_anno["log_id"] = log_id
#         seg_anno_list.append(seg_anno)

#     gts = pd.concat(seg_anno_list).reset_index()
#     gts.to_feather(save_feather_path)

