import os
import numpy as np
from numpy.typing import NDArray
from typing import Tuple, List, Dict, Optional
import struct
from math import cos, sin, pi
from PIL import Image

from nuscenes.nuscenes import NuScenes
from nuscenes.map_expansion.map_api import NuScenesMap

from common_util import read_image, read_json, write_json
from common_util import transform_3d_cloud, quaternion_to_rotation_matrix, quaternion_to_euler
from common_util import line_in_polygon_roi, polyline_in_polygon_roi
from dataset_sdk import SupportedTag, RoadTypeTag, Metadata
from dataset_sdk import CoordinateSystem, Frame, Scene, DatasetDevkit, Obstacle, ObstacleCategory
from dataset_sdk import LaneDivider, LaneDividerLineType, LaneDividerColor
from dataset_sdk import RoadEdge, RoadEdgeCategory
from dataset_sdk import RoadMarking, RoadMarkingCategory
from dataset_sdk import TrafficLight, TrafficLightType, TrafficLightStatus
from dataset_sdk import MapInfo

__all__ = ['NuscenesFrame', 'NuscenesScene', 'NuscenesDevkit']

RAW_LIDAR_CHANNELS = ('x', 'y', 'z', 'intensity', 'ring')

RAW_RADAR_CHANNELS = ('x', 'y', 'z', 'dyn_prop', 'id', 'rcs', 'vx', 'vy', 'vx_comp', 'vy_comp', 'is_quality_valid',
                      'ambig_state', 'x_rms', 'y_rms', 'invalid_state', 'pdh0', 'vx_rms', 'vy_rms')

OUT_LIDAR_CHANNELS = ('x', 'y', 'z', 'intensity', 'ring')

OUT_RADAR_CHANNELS = ('x', 'y', 'z', 'vx', 'vy', 'rcs')

LIDARS = ['LIDAR_TOP']
CAMERAS = ['CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_FRONT_LEFT']
RADARS = ['RADAR_FRONT', 'RADAR_FRONT_LEFT', 'RADAR_FRONT_RIGHT', 'RADAR_BACK_LEFT', 'RADAR_BACK_RIGHT']

TYPE_DICT = {
    'human.pedestrian.adult':               ObstacleCategory.pedestrian,
    'human.pedestrian.child':               ObstacleCategory.pedestrian,
    'human.pedestrian.construction_worker': ObstacleCategory.pedestrian,
    'human.pedestrian.personal_mobility':   ObstacleCategory.pedestrian,
    'human.pedestrian.police_officer':      ObstacleCategory.pedestrian,
    'movable_object.barrier':               ObstacleCategory.barrier,
    'movable_object.debris':                ObstacleCategory.debris,
    'movable_object.pushable_pullable':     ObstacleCategory.ignore,
    'movable_object.trafficcone':           ObstacleCategory.traffic_cone,
    'static_object.bicycle_rack':           ObstacleCategory.debris,
    'vehicle.bicycle':                      ObstacleCategory.bicycle,
    'vehicle.bus.bendy':                    ObstacleCategory.bus,
    'vehicle.bus.rigid':                    ObstacleCategory.bus,
    'vehicle.car':                          ObstacleCategory.car,
    'vehicle.construction':                 ObstacleCategory.truck,
    'vehicle.emergency.ambulance':          ObstacleCategory.car,
    'vehicle.emergency.police':             ObstacleCategory.car,
    'vehicle.motorcycle':                   ObstacleCategory.motorcycle,
    'vehicle.trailer':                      ObstacleCategory.trailer,
    'vehicle.truck':                        ObstacleCategory.truck,
    'human.pedestrian.stroller':            ObstacleCategory.pedestrian,
    'human.pedestrian.wheelchair':          ObstacleCategory.pedestrian,
    'animal':                               ObstacleCategory.debris
}

MAX_MAP_OUT_RANGE = 150.

CACHE_FILE_NAME = 'nuscenes_fastload.cache'

def rotation_to_matrix(nuscenes_rotation: list) -> NDArray:
    assert len(nuscenes_rotation) == 4
    quat_xyzw = [nuscenes_rotation[i + 1] for i in range(3)]
    quat_xyzw.append(nuscenes_rotation[0])
    return quaternion_to_rotation_matrix(quat_xyzw)

def rotation_to_euler(nuscenes_rotation: list) -> NDArray:
    assert len(nuscenes_rotation) == 4
    quat_xyzw = [nuscenes_rotation[i + 1] for i in range(3)]
    quat_xyzw.append(nuscenes_rotation[0])
    return quaternion_to_euler(quat_xyzw)

def load_lidar_bin(bin_path) -> NDArray:
    """
    Loads LIDAR data from binary numpy format. Data is stored as (x, y, z, intensity, ring index).
    """
    assert bin_path.endswith('.bin')
    data = np.fromfile(bin_path, dtype=np.float32)
    return data.reshape((-1, len(RAW_LIDAR_CHANNELS)))

def to_transform_matrix(nuscenes_translation: list, nuscenes_rotation: list) -> NDArray:
    assert len(nuscenes_translation) == 3 and len(nuscenes_rotation) == 4
    rot_mat = rotation_to_matrix(nuscenes_rotation)
    upper_mat = np.hstack((rot_mat, np.array(nuscenes_translation).reshape((3, 1))))
    assert upper_mat.shape == (3, 4)
    return np.vstack((upper_mat, np.array([0, 0, 0, 1]).reshape((1, 4))))

def load_radar_pcd(pcd_path) -> NDArray:
    """
    Example of the header fields:
    # .PCD v0.7 - Point Cloud Data file format
    VERSION 0.7
    FIELDS x y z dyn_prop id rcs vx vy vx_comp vy_comp is_quality_valid ambig_state x_rms y_rms invalid_state pdh0 vx_rms vy_rms
    SIZE 4 4 4 1 2 4 4 4 4 4 1 1 1 1 1 1 1 1
    TYPE F F F I I F F F F F I I I I I I I I
    COUNT 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
    WIDTH 125
    HEIGHT 1
    VIEWPOINT 0 0 0 1 0 0 0
    POINTS 125
    DATA binary
    vx, vy are the velocities in m/s.
    vx_comp, vy_comp are the velocities in m/s compensated by the ego motion.
    We recommend using the compensated velocities.
    invalid_state: state of Cluster validity state.
    (Invalid states)
    0x01	invalid due to low RCS
    0x02	invalid due to near-field artefact
    0x03	invalid far range cluster because not confirmed in near range
    0x05	reserved
    0x06	invalid cluster due to high mirror probability
    0x07	Invalid cluster because outside sensor field of view
    0x0d	reserved
    0x0e	invalid cluster because it is a harmonics
    (Valid states)
    0x00	valid
    0x04	valid cluster with low RCS
    0x08	valid cluster with azimuth correction due to elevation
    0x09	valid cluster with high child probability
    0x0a	valid cluster with high probability of being a 50 deg artefact
    0x0b	valid cluster but no local maximum
    0x0c	valid cluster with high artefact probability
    0x0f	valid cluster with above 95m in near range
    0x10	valid cluster with high multi-target probability
    0x11	valid cluster with suspicious angle
    dynProp: Dynamic property of cluster to indicate if is moving or not.
    0: moving
    1: stationary
    2: oncoming
    3: stationary candidate
    4: unknown
    5: crossing stationary
    6: crossing moving
    7: stopped
    ambig_state: State of Doppler (radial velocity) ambiguity solution.
    0: invalid
    1: ambiguous
    2: staggered ramp
    3: unambiguous
    4: stationary candidates
    pdh0: False alarm probability of cluster (i.e. probability of being an artefact caused by multipath or similar).
    0: invalid
    1: <25%
    2: 50%
    3: 75%
    4: 90%
    5: 99%
    6: 99.9%
    7: <=100%
    """
    assert pcd_path.endswith('.pcd')
    meta = list()
    with open(pcd_path, 'rb') as f:
        for line in f:
            line = line.strip().decode('utf-8')
            meta.append(line)
            if line.startswith('DATA'):
                break
        data_binary = f.read()
    # Get the header rows and check if they appear as expected.
    assert meta[0].startswith('#'), 'First line must be comment'
    assert meta[1].startswith('VERSION'), 'Second line must be VERSION'
    fields = meta[2].split(' ')[1:]
    sizes = meta[3].split(' ')[1:]
    types = meta[4].split(' ')[1:]
    counts = meta[5].split(' ')[1:]
    width = int(meta[6].split(' ')[1])
    height = int(meta[7].split(' ')[1])
    data = meta[10].split(' ')[1]
    feature_cnt = len(fields)
    assert width > 0
    assert len([c for c in counts if c != '1']) == 0, 'Error: COUNT not supported!'
    assert height == 1, 'Error: height != 0 not supported!'
    assert feature_cnt == len(types) == len(sizes)
    assert data == 'binary'
    # Lookup table for how to decode the binaries.
    unpacking_lut = {'F': {'2': 'e', '4': 'f', '8': 'd'},
                     'I': {'1': 'b', '2': 'h', '4': 'i', '8': 'q'},
                     'U': {'1': 'B', '2': 'H', '4': 'I', '8': 'Q'}}
    unpack_rule = '<' + ''.join([unpacking_lut[t][s] for t, s in zip(types, sizes)])
    pt_bin_len = sum([int(sizes[i]) for i in range(feature_cnt)])
    # Decode each point.
    offset = 0
    points = list()
    for i in range(width):
        point = list(struct.unpack(unpack_rule, data_binary[i * pt_bin_len: (i + 1) * pt_bin_len]))
        points.append(point)
    # A NaN in the first point indicates an empty pointcloud.
    if np.any(np.isnan(np.array(points[0]))):
        return np.zeros((0, feature_cnt))
    # Convert to numpy matrix.
    points = np.array(points)
    # define filter
    radar_flt = {'invalid_state': [0, 4, 8, 9],
                 'dyn_prop': [0, 1, 2, 3, 4, 5, 6, 7],
                 'ambig_state': [3, 4]}
    for prop, rules in radar_flt.items():
        flt_ids = [p in rules for p in points[:, fields.index(prop)]]
        points = points[flt_ids, :]
    assert points.shape[1] == len(RAW_RADAR_CHANNELS)
    return points

ROI_SAMPLE = 32
def load_nodes_to_roi(node_tokens: List[str], is_polygon: bool, nusc_map: NuScenesMap,
                      pose: NDArray=np.identity(4), roi=-1.) -> List[NDArray]:
    points = []
    for node_token in node_tokens:
        node = nusc_map.get('node', node_token)
        points.append([node['x'], node['y'], 0])
    points = transform_3d_cloud(np.array(points).reshape(-1, 3), pose[:3, 3], pose[:3, :3], inverse=True)
    if roi < 0:
        return [points[list(range(len(points)))+[0]]] if is_polygon else [points]
    roi_pts = [[cos(pi/ROI_SAMPLE*2*i)*roi, sin(pi/ROI_SAMPLE*2*i)*roi] for i in range(ROI_SAMPLE)]
    if is_polygon:
        return polyline_in_polygon_roi(points, np.array(roi_pts))
    else:
        return line_in_polygon_roi(points, np.array(roi_pts))

LANE_DIVIDER_LINE_TYPE_MAP = {
    'DOUBLE_DASHED_WHITE':  LaneDividerLineType.dashed,
    'NIL':                  LaneDividerLineType.dashed,
    'SINGLE_ZIGZAG_WHITE':  LaneDividerLineType.solid,
    'DOUBLE_SOLID_WHITE':   LaneDividerLineType.solid,
    'SINGLE_SOLID_WHITE':   LaneDividerLineType.solid,
    'SINGLE_SOLID_YELLOW':  LaneDividerLineType.solid,
}

LANE_DIVIDER_COLOR_MAP = {
    'DOUBLE_DASHED_WHITE':  LaneDividerColor.white,
    'NIL':                  LaneDividerColor.white,
    'SINGLE_ZIGZAG_WHITE':  LaneDividerColor.white,
    'DOUBLE_SOLID_WHITE':   LaneDividerColor.white,
    'SINGLE_SOLID_WHITE':   LaneDividerColor.white,
    'SINGLE_SOLID_YELLOW':  LaneDividerColor.yellow,
}

def get_lane_dividers(nusc_map: NuScenesMap, pose: NDArray=np.identity(4), roi=-1.) -> List[LaneDivider]:
    assert pose.shape == (4, 4)
    lane_dividers: List[LaneDivider] = []
    for divider in nusc_map.lane_divider:
        id_ = divider['token']
        line_type = LANE_DIVIDER_LINE_TYPE_MAP[divider['lane_divider_segments'][0]['segment_type']]
        color = LANE_DIVIDER_COLOR_MAP[divider['lane_divider_segments'][0]['segment_type']]
        lines = load_nodes_to_roi(divider['node_tokens'], False, nusc_map, pose, roi)
        for line in lines:
            if len(line) > 1:
                lane_dividers.append(LaneDivider(id_, line_type, color, line))
    for divider in nusc_map.road_divider:
        id_ = divider['token']
        line_type = LaneDividerLineType.solid
        color = LaneDividerColor.white
        lines = load_nodes_to_roi(divider['node_tokens'], False, nusc_map, pose, roi)
        for line in lines:
            if len(line) > 1:
                lane_dividers.append(LaneDivider(id_, line_type, color, line))
    return lane_dividers

def get_road_edges(nusc_map: NuScenesMap, pose: NDArray=np.identity(4), roi=-1.) -> List[RoadEdge]:
    assert pose.shape == (4, 4)
    road_edges: List[RoadEdge] = []
    for area in nusc_map.drivable_area:
        for poly_token in area['polygon_tokens']:
            if poly_token is not None:
                id_ = poly_token
                polygon = nusc_map.get('polygon', poly_token)
                lines = load_nodes_to_roi(polygon['exterior_node_tokens'], True, nusc_map, pose, roi)
                for line in lines:
                    if len(line) > 1:
                        road_edges.append(RoadEdge(id_, RoadEdgeCategory.road_edge, line))
                for hole_id, hole in enumerate(polygon['holes']):
                    id_ = f'{poly_token}_{hole_id}'
                    lines = load_nodes_to_roi(hole['node_tokens'], True, nusc_map, pose, roi)
                    for line in lines:
                        if len(line) > 1:
                            road_edges.append(RoadEdge(id_, RoadEdgeCategory.road_edge, line))
    return road_edges

def get_road_markings(nusc_map: NuScenesMap, pose: NDArray=np.identity(4), roi=-1.) -> List[RoadMarking]:
    assert pose.shape == (4, 4)
    road_markings: List[RoadMarking] = []
    for marking in nusc_map.ped_crossing:
        id_ = marking['token']
        lines = load_nodes_to_roi(marking['exterior_node_tokens'], True, nusc_map, pose, roi)
        if len(lines[0]) > len(marking['exterior_node_tokens']):
            if np.allclose(lines[0][0], lines[0][-1]):
                road_markings.append(RoadMarking(id_, [RoadMarkingCategory.crosswalk], lines[0][:-1]))
    for marking in nusc_map.stop_line:
        if marking['stop_line_type'] not in ['PED_CROSSING', 'TRAFFIC_LIGHT', 'STOP_SIGN', 'YIELD']:
            continue
        id_ = marking['token']
        lines = load_nodes_to_roi(marking['exterior_node_tokens'], True, nusc_map, pose, roi)
        if len(lines[0]) > len(marking['exterior_node_tokens']):
            if np.allclose(lines[0][0], lines[0][-1]):
                road_markings.append(RoadMarking(id_, [RoadMarkingCategory.stop_line], lines[0][:-1]))
    return road_markings

TRAFFIC_LIGHT_TYPE_MAP = {
    'CIRCLE':   TrafficLightType.round_,
    'LEFT':     TrafficLightType.left,
    'RIGHT':    TrafficLightType.right,
    'UP':       TrafficLightType.straight,
}

def get_traffic_lights(nusc_map: NuScenesMap, pose: NDArray=np.identity(4), roi=-1.) -> List[TrafficLight]:
    assert pose.shape == (4, 4)
    traffic_lights: List[TrafficLight] = []
    for light in nusc_map.traffic_light:
        anchor = np.array([light['pose']['tx'], light['pose']['ty'], light['pose']['tz']])
        rel_anc = transform_3d_cloud(anchor.reshape(-1, 3), pose[:3, 3], pose[:3, :3], inverse=True)
        if rel_anc[0, 0]**2 + rel_anc[0, 1]**2 > roi**2:
            continue
        status = TrafficLightStatus.none
        rotation = [0., 0., light['pose']['rz']]
        bulbs = {}
        for it in light['items']:
            if it['shape'] not in bulbs.keys():
                bulbs[it['shape']] = [[it['rel_pos']['tx'], it['rel_pos']['ty'], it['rel_pos']['tz']]]
            else:
                bulbs[it['shape']].append([it['rel_pos']['tx'], it['rel_pos']['ty'], it['rel_pos']['tz']])
        for shape, rel_poses in bulbs.items():
            id_ = f'{light["token"]}_{shape}'
            type_ = TRAFFIC_LIGHT_TYPE_MAP[shape]
            rel_poses = np.array(rel_poses)
            min_xyz = rel_poses.min(axis=0) - np.array([0, 0.126, 0.126])
            max_xyz = rel_poses.max(axis=0) + np.array([0, 0.126, 0.126])
            center = (max_xyz + min_xyz) / 2 + anchor
            size = max_xyz - min_xyz
            traffic_lights.append(TrafficLight(id_, type_, status, center, size, rotation))
            traffic_lights[-1].shift_coordinate_system(pose[:3, 3], pose[:3, :3], inverse=True)
    return traffic_lights

def get_scene_fast_load_cache(nusc: NuScenes, scene_dict: dict) -> List[dict]:
    frame = nusc.get('sample', scene_dict['first_sample_token'])
    all_sensors = list(frame['data'].keys())
    snrs_tokens = {sensor: [] for sensor in all_sensors}
    snrs_stamps = {sensor: [] for sensor in all_sensors}
    for sensor in all_sensors:
        sensor_token = frame['data'][sensor]
        while len(sensor_token) > 0:
            sensor_dict = nusc.get('sample_data', sensor_token)
            snrs_tokens[sensor].append(sensor_token)
            snrs_stamps[sensor].append(sensor_dict['timestamp'])
            sensor_token = sensor_dict['next']
    snrs_stamps = {sensor: np.array(stamps) for sensor, stamps in snrs_stamps.items()}
    SYNC_SNR = 'CAM_FRONT'
    SYNC_GAP = 60000
    sync_dict = {}
    for main_token in snrs_tokens[SYNC_SNR]:
        main_snr_dict = nusc.get('sample_data', main_token)
        if main_snr_dict['is_key_frame']:
            frame_dict = nusc.get('sample', main_snr_dict['sample_token'])
            sync_dict[frame_dict['timestamp']] = frame_dict
            continue
        sync = {SYNC_SNR: main_token}
        main_stamp = main_snr_dict['timestamp']
        for sensor in all_sensors:
            if sensor != SYNC_SNR:
                gaps = abs(main_stamp - snrs_stamps[sensor])
                if gaps.min() < SYNC_GAP:
                    sync[sensor] = snrs_tokens[sensor][gaps.argmin()]
                else:
                    break
        if len(sync) == len(all_sensors):
            lidar_stamp = nusc.get('sample_data', sync['LIDAR_TOP'])['timestamp']
            if lidar_stamp not in sync_dict.keys():
                sync_dict[lidar_stamp] = {
                    'timestamp': lidar_stamp,
                    'scene_token': scene_dict['token'],
                    'data': sync,
                }
    return list(sync_dict.values())

class NuscenesFrame(Frame):
    """
    "Sample" means an annotated frame in a scene, which is annotated every 0.5s
    {'token': 'ca9a282c9e77460f8360f564131a8af5',
     'timestamp': 1532402927647951, # based on LIDAR_TOP
     'prev': '',
     'next': '39586f9d59004284a7114a68825e8eec',
     'scene_token': 'cc8c0bf57f984915a77078b10eb33198',
     'data': {'RADAR_FRONT': '37091c75b9704e0daa829ba56dfa0906',
              'RADAR_FRONT_LEFT': '11946c1461d14016a322916157da3c7d',
              'RADAR_FRONT_RIGHT': '491209956ee3435a9ec173dad3aaf58b',
              'RADAR_BACK_LEFT': '312aa38d0e3e4f01b3124c523e6f9776',
              'RADAR_BACK_RIGHT': '07b30d5eb6104e79be58eadf94382bc1',
              'LIDAR_TOP': '9d9bf11fb0e144c8b446d54a8a00184f',
              'CAM_FRONT': 'e3d495d4ac534d54b321f50006683844',
              'CAM_FRONT_RIGHT': 'aac7867ebf4f446395d29fbd60b63b3b',
              'CAM_BACK_RIGHT': '79dbb4460a6b40f49f9c150cb118247e',
              'CAM_BACK': '03bea5763f0f4722933508d5999c5fd8',
              'CAM_BACK_LEFT': '43893a033f9c46d4a51b5e08a67a1eb7',
              'CAM_FRONT_LEFT': 'fe5422747a7d4268a4b07fc396707b23'},
     'anns': ['ef63a697930c4b20a6b9791f423351da', ...]}
    "Sample data" means sensor data, which contains totally 12 sensors
    {'token': 'e3d495d4ac534d54b321f50006683844',
     'sample_token': 'ca9a282c9e77460f8360f564131a8af5',
     'ego_pose_token': 'e3d495d4ac534d54b321f50006683844',
     'calibrated_sensor_token': '1d31c729b073425e8e0202c5c6e66ee1',
     'timestamp': 1532402927612460,
     'fileformat': 'jpg',
     'is_key_frame': True,
     'height': 900,
     'width': 1600,
     'filename': 'samples/CAM_FRONT/n015-2018-07-24-11-22-45+0800__CAM_FRONT__1532402927612460.jpg',
     'prev': '',
     'next': '68e8e98cf7b0487baa139df808641db7',
     'sensor_modality': 'camera',
     'channel': 'CAM_FRONT'}
    """
    def __init__(self, meta: Metadata, calib: Dict[str, NDArray], nusc: NuScenes,
                 nusc_map: NuScenesMap, path, cache: dict) -> None:
        super().__init__(meta, calib)
        self.__nusc = nusc
        self.__nusc_map = nusc_map
        self.__path = path
        self.__frame_dict = cache
        self.__ego_pose = None
        self.lidars = LIDARS
        self.cameras = CAMERAS
        self.radars = RADARS
        # for sensor, token in frame_dict['data'].items():
        #     sensor_dict = nusc.get('sample_data', token)
        #     if sensor_dict['sensor_modality'] == 'lidar':
        #         self.lidars.append(sensor)
        #     elif sensor_dict['sensor_modality'] == 'camera':
        #         self.cameras.append(sensor)
        #     elif sensor_dict['sensor_modality'] == 'radar':
        #         self.radars.append(sensor)
        #     else:
        #         print(f'ERROR: unknow sensor modality: {sensor} {sensor_dict["sensor_modality"]}')
        #         raise ValueError

    @property
    def ego_pose(self) -> NDArray:
        if self.__ego_pose is None:
            lidar_dict = self.__nusc.get('sample_data', self.__frame_dict['data']['LIDAR_TOP'])
            pose_dict = self.__nusc.get('ego_pose', lidar_dict['ego_pose_token'])
            self.__ego_pose = to_transform_matrix(pose_dict['translation'], pose_dict['rotation'])
        return self.__ego_pose

    def get_sensor_params(self) -> dict:
        params = {}
        for lidar in self.lidars:
            lidar_dict = self.__nusc.get('sample_data', self.__frame_dict['data'][lidar])
            bin_path = lidar_dict['filename']
            sensor_pose = self.__nusc.get('calibrated_sensor', lidar_dict['calibrated_sensor_token'])
            params[lidar] = [bin_path, np.array(sensor_pose['translation']),
                             rotation_to_matrix(sensor_pose['rotation'])]
        for camera in self.cameras:
            camera_dict = self.__nusc.get('sample_data', self.__frame_dict['data'][camera])
            jpg_path = camera_dict['filename']
            params[camera] = [jpg_path]
        params['token'] = self.__frame_dict['token'] if 'token' in self.__frame_dict.keys() else ''
        return params

    def get_lidar_cloud(self, lidar: str, coordinate_system=CoordinateSystem.ego) -> Optional[NDArray]:
        if super().get_lidar_cloud(lidar, coordinate_system) is None:
            return None
        lidar_dict = self.__nusc.get('sample_data', self.__frame_dict['data'][lidar])
        bin_path = os.path.join(self.__path, lidar_dict['filename'])
        cloud = load_lidar_bin(bin_path)
        if coordinate_system == CoordinateSystem.sensor:
            pass
        elif coordinate_system == CoordinateSystem.ego:
            cloud = self.__sensor_cloud_to_ego(cloud, lidar_dict)
        elif coordinate_system == CoordinateSystem.world:
            cloud = self.__sensor_cloud_to_world(cloud, lidar_dict)
        chs = []
        for ch in OUT_LIDAR_CHANNELS:
            chs.append(RAW_LIDAR_CHANNELS.index(ch))
        return cloud[:, chs]

    def get_camera_image(self, camera: str, undistort=True) -> Optional[Image.Image]:
        if super().get_camera_image(camera) is None:
            return None
        camera_dict = self.__nusc.get('sample_data', self.__frame_dict['data'][camera])
        jpg_path = os.path.join(self.__path, camera_dict['filename'])
        return read_image(jpg_path)

    def get_radar_cloud(self, radar: str, coordinate_system=CoordinateSystem.ego) -> Optional[NDArray]:
        if super().get_radar_cloud(radar, coordinate_system) is None:
            return None
        radar_dict = self.__nusc.get('sample_data', self.__frame_dict['data'][radar])
        pcd_path = os.path.join(self.__path, radar_dict['filename'])
        cloud = load_radar_pcd(pcd_path)
        if coordinate_system == CoordinateSystem.sensor:
            pass
        elif coordinate_system == CoordinateSystem.ego:
            cloud = self.__sensor_cloud_to_ego(cloud, radar_dict)
        elif coordinate_system == CoordinateSystem.world:
            cloud = self.__sensor_cloud_to_world(cloud, radar_dict)
        chs = []
        for ch in OUT_RADAR_CHANNELS:
            chs.append(RAW_RADAR_CHANNELS.index(ch))
        return cloud[:, chs]

    def get_obstacles(self, coordinate_system=CoordinateSystem.ego) -> Optional[List[Obstacle]]:
        """
        "Sample annotation" means annotation of one target
        {'token': '83d881a6b3d94ef3a3bc3b585cc514f8',
         'sample_token': 'ca9a282c9e77460f8360f564131a8af5',
         'instance_token': 'e91afa15647c4c4994f19aeb302c7179',
         'visibility_token': '4',
         'attribute_tokens': ['58aa28b1c2a54dc88e169808c07331e3'],
         'translation': [409.989, 1164.099, 1.623],
         'size': [2.877, 10.201, 3.595],
         'rotation': [-0.5828819500503033, 0.0, 0.0, 0.812556848660791],
         'prev': '',
         'next': 'f3721bdfd7ee4fd2a4f94874286df471',
         'num_lidar_pts': 495,
         'num_radar_pts': 13,
         'category_name': 'vehicle.truck'}
        """
        objects: List[Obstacle] = []
        if super().get_obstacles(coordinate_system) is None:
            return None
        elif coordinate_system == CoordinateSystem.sensor:
            print('INFO: sensor coordinate system for "get_obstacles" is based on LIDAR_TOP')
        if 'anns' not in self.__frame_dict.keys():
            return None
        for annotation_token in self.__frame_dict['anns']:
            anno = self.__nusc.get('sample_annotation', annotation_token)
            id_ = anno['instance_token']
            category = TYPE_DICT[anno['category_name']]
            center = anno['translation']
            size = [anno['size'][1], anno['size'][0], anno['size'][2]]
            rotation = rotation_to_euler(anno['rotation'])
            num_lidar_pts = anno['num_lidar_pts']
            num_radar_pts = anno['num_radar_pts']
            obj = Obstacle(id_, category, center, size, rotation, num_lidar_pts, num_radar_pts)
            if coordinate_system == CoordinateSystem.world:
                pass
            else:
                lidar_dict = self.__nusc.get('sample_data', self.__frame_dict['data']['LIDAR_TOP'])
                ego_pose = self.__nusc.get('ego_pose', lidar_dict['ego_pose_token'])
                obj.shift_coordinate_system(np.array(ego_pose['translation']),
                                            rotation_to_matrix(ego_pose['rotation']), inverse=True)
                if coordinate_system == CoordinateSystem.ego:
                    pass
                elif coordinate_system == CoordinateSystem.sensor:
                    sensor_pose = self.__nusc.get('calibrated_sensor', lidar_dict['calibrated_sensor_token'])
                    obj.shift_coordinate_system(np.array(sensor_pose['translation']),
                                                rotation_to_matrix(sensor_pose['rotation']), inverse=True)
            objects.append(obj)
        return objects

    def get_map_info(self, coordinate_system=CoordinateSystem.ego, roi=MAX_MAP_OUT_RANGE) -> Optional[MapInfo]:
        if super().get_map_info(coordinate_system) is None:
            return None
        elif coordinate_system == CoordinateSystem.sensor:
            print(f'INFO: sensor coordinate system for "get_map_info" is based on LIDAR_TOP')
            lidar_dict = self.__nusc.get('sample_data', self.__frame_dict['data']['LIDAR_TOP'])
            sensor_calib = self.__nusc.get('calibrated_sensor', lidar_dict['calibrated_sensor_token'])
            ego_to = np.linalg.inv(to_transform_matrix(sensor_calib['translation'], sensor_calib['rotation']))
        elif coordinate_system == CoordinateSystem.ego:
            ego_to = np.identity(4)
        elif coordinate_system == CoordinateSystem.world:
            ego_to = self.ego_pose
        map_info = MapInfo()
        map_info.lane_dividers = get_lane_dividers(self.__nusc_map, self.ego_pose, roi)
        map_info.road_edges = get_road_edges(self.__nusc_map, self.ego_pose, roi)
        map_info.road_markings = get_road_markings(self.__nusc_map, self.ego_pose, roi)
        map_info.traffic_lights = get_traffic_lights(self.__nusc_map, self.ego_pose, roi)
        map_info.shift_coordinate_system(ego_to[:3, 3], ego_to[:3, :3])
        return map_info

    def get_timestamp(self, sensor=None) -> Optional[int]:
        if super().get_timestamp(sensor) is None:
            return None
        if sensor is None:
            return self.__frame_dict['timestamp']
        assert sensor in self.all_sensors()
        sensor_dict = self.__nusc.get('sample_data', self.__frame_dict['data'][sensor])
        return sensor_dict['timestamp']

    def __sensor_cloud_to_ego(self, cloud, sensor_dict) -> NDArray:
        sensor_pose = self.__nusc.get('calibrated_sensor', sensor_dict['calibrated_sensor_token'])
        return transform_3d_cloud(cloud, np.array(sensor_pose['translation']),
                                  rotation_to_matrix(sensor_pose['rotation']))

    def __sensor_cloud_to_world(self, cloud, sensor_dict) -> NDArray:
        ego_cloud = self.__sensor_cloud_to_ego(cloud, sensor_dict)
        ego_pose = self.__nusc.get('ego_pose', sensor_dict['ego_pose_token'])
        return transform_3d_cloud(ego_cloud, np.array(ego_pose['translation']),
                                  rotation_to_matrix(ego_pose['rotation']))

class NuscenesScene(Scene):
    """
    "Scene" in nuscenes dataset means a sequence data in 20 seconds
    {'token': 'cc8c0bf57f984915a77078b10eb33198',
     'log_token': '7e25a2c8ea1f41c5b0da1e69ecfa71a2',
     'nbr_samples': 39,
     'first_sample_token': 'ca9a282c9e77460f8360f564131a8af5',
     'last_sample_token': 'ed5fc18c31904f96a8f0dbb99ff069c0',
     'name': 'scene-0061',
     'description': 'Parked truck, construction, intersection, turn left, following a van'}
    """
    def __init__(self, scene_dict: dict, nusc: NuScenes, nusc_map: NuScenesMap, path, cache: List[dict]) -> None:
        super().__init__()
        self.__nusc = nusc
        self.__scene_dict = scene_dict
        self.meta.name = self.__scene_dict['name']
        self.meta.description = self.__scene_dict['description']
        self.meta.tags = [SupportedTag.continuous_anno, SupportedTag.obj_anno, SupportedTag.lane_anno,
                          SupportedTag.edge_anno, SupportedTag.mark_anno, SupportedTag.light_anno]
        self.meta.lidar_channels = OUT_LIDAR_CHANNELS
        self.meta.radar_channels = OUT_RADAR_CHANNELS
        self.meta.main_sensor = 'LIDAR_TOP'
        self.meta.road_type = RoadTypeTag.urban
        frame_token = scene_dict['first_sample_token']
        frame = nusc.get('sample', frame_token)
        for sensor, token in frame['data'].items():
            to_ego, intrinsic = self.__get_sensor_calib(token)
            self.calib[f'{sensor}_ego'] = to_ego
            if intrinsic.size > 0:
                self.calib[f'{sensor}_intrinsic'] = intrinsic
        self.frames = [NuscenesFrame(self.meta, self.calib, nusc, nusc_map, path, frame_cache)
                       for frame_cache in cache]
        # while len(frame_token) > 0:
        #     frame = nusc.get('sample', frame_token)
        #     self.frames.append(NuscenesFrame(frame, nusc, nusc_map, path, self.calib, self.meta))
        #     frame_token = frame['next']

    @property
    def map_location(self) -> str:
        return self.__nusc.get('log', self.__scene_dict['log_token'])['location']

    def __get_sensor_calib(self, sensor_token: str) -> Tuple[NDArray, NDArray]:
        sensor_dict = self.__nusc.get('sample_data', sensor_token)
        sensor_calib = self.__nusc.get('calibrated_sensor', sensor_dict['calibrated_sensor_token'])
        sensor_to_ego = to_transform_matrix(sensor_calib['translation'], sensor_calib['rotation'])
        sensor_intrinsic = np.array(sensor_calib['camera_intrinsic']) \
            if sensor_dict['sensor_modality'] == 'camera' else np.array([])
        return sensor_to_ego, sensor_intrinsic

class NuscenesDevkit(DatasetDevkit):
    def __init__(self, path) -> None:
        self.__nusc = None
        super().__init__(path)

    @staticmethod
    def check_file_structure(path, version: str='v1.0-trainval') -> bool:
        if not DatasetDevkit.check_file_structure(path):
            return False
        supported_version = ['v1.0-trainval', 'v1.0-mini']
        if version not in supported_version:
            print(f'ERROR: version {version} is not supported')
            return False
        file_set = set(os.listdir(path))
        check_set = set([version, 'maps', 'samples', 'sweeps'])
        miss_set = check_set - file_set
        if len(miss_set) > 0:
            print(f'ERROR: file(s) missing under {path}: {list(miss_set)}')
            return False
        else:
            print(f'INFO: check passed')
            return True

    def load_dataset(self, path, version: str='v1.0-trainval') -> None:
        # init nuscenes dataset handler by devkit
        self.__nusc = NuScenes(version=version, dataroot=path, verbose=True)
        nusc_maps = {}
        cache_path = os.path.join(path, CACHE_FILE_NAME)
        if os.path.exists(cache_path):
            cache = read_json(cache_path)
        else:
            cache = self._cache_for_fast_load()
            write_json(cache_path, cache)
        for scene in self.__nusc.scene:
            scene_map = self.__nusc.get('log', scene['log_token'])['location']
            if scene_map not in nusc_maps.keys():
                nusc_map = NuScenesMap(dataroot=path, map_name=scene_map)
                nusc_maps.update({scene_map: nusc_map})
            self.scenes.append(NuscenesScene(scene, self.__nusc, nusc_maps[scene_map],
                                             path, cache[scene['name']]))

    def _cache_for_fast_load(self) -> dict:
        cache = {}
        for scene in self.__nusc.scene:
            cache[scene['name']] = get_scene_fast_load_cache(self.__nusc, scene)
        return cache
