from abc import ABCMeta, abstractmethod
from enum import Enum, unique
import numpy as np
from numpy.typing import NDArray
from typing import List, Tuple, Union, Dict, Optional
import os
from PIL import Image
import matplotlib.pyplot as plt

from common_util import check_folder_empty, clear_folder, write_json, save_numpy_as_rgb_image, transform_3d_cloud
from common_util import init_axes, MATPLOTLIB_COLOR, matplot_2d_cloud, matplot_image, matplot_cloud_to_image
from common_util import matplot_line_to_image, read_json
from dataset_sdk import Obstacle, MotionObstacle, ObjectTrack, MapInfo
from dataset_sdk import LaneDividerColor, LaneDividerLineType

__all__ = ['SupportedTag', 'WeatherTag', 'LightingTag', 'RoadConditionTag', 'RoadTypeTag', 'Metadata',
           'CoordinateSystem', 'DatasetCache', 'Frame', 'Scene', 'DatasetDevkit']

CACHE_CONVENTION = {
    'metadata': 'metadata.json',
    'calibration_dir': 'calib',
    'lidar_dir': 'lidars',
    'camera_dir': 'cameras',
    'radar_dir': 'radars',
    'ego_motion_dir': 'ego_motion',
    'object_anno_dir': 'obj_anno',
    'map_anno_dir': 'map_anno',
    'sync_info_dir': 'sync_info',
    'lidar_dtype': 'float32',
    'camera_dtype': 'uint8',
    'radar_dtype': 'float32',
}

MIN_CA_INTERVAL_S = 0.2

def check_create_or_clear_path(path, force: bool=False) -> bool:
    if os.path.isdir(path):
        if not check_folder_empty(path):
            print(f'WARN: {path} is not empty')
            if force:
                clear_folder(path)
                print(f'INFO: path is cleared')
                return True
            else:
                print(f'INFO: path should be empty before cache in')
                return False
        else:
            return True
    else:
        os.makedirs(path)
        print(f'INFO: {path} not exist, so is created')
        return True

def check_hint_coordinate_system(coordinate_system) -> bool:
    if not isinstance(coordinate_system, CoordinateSystem):
        print(f'ERROR: unrecognized coordinate system {coordinate_system}')
        return False
    return True

@unique
class SupportedTag(Enum):
    discrete_anno = 'discrete annotated'
    continuous_anno = 'continuous annotated'
    obj_anno = 'object annotated'
    obj_autolabel = 'object autolabeled'
    lane_anno = 'lane divider annotated'
    edge_anno = 'road edge annotated'
    mark_anno = 'road marking annotated'
    sign_anno = 'traffic sign annotated'
    light_anno = 'traffic light annotated'
    center_line_anno = 'center line annotated'
    center_line_topo_anno = 'center line topology annotated'
    tongfan_map_anno = 'Tongfan map annotation'
    labelme_map_anno = 'labelme map annotation'
    jishu_map_anno = 'Jishu map annotation'

@unique
class WeatherTag(Enum):
    sunny = 'sunny'
    cloudy = 'cloudy'
    rainy = 'rainy'
    snowy = 'snowy'
    foggy = 'foggy'
    dusty = 'dusty'
    unknown = 'unknown'

@unique
class LightingTag(Enum):
    daytime = 'daytime'
    night = 'night'
    morning = 'morning'
    dusk = 'dusk'
    tunnel = 'tunnel'
    indoor = 'indoor'
    unknown = 'unknown'

@unique
class RoadConditionTag(Enum):
    normal = 'normal'
    wet = 'wet'
    snow = 'snow'
    icy = 'icy'
    unknown = 'unknown'

@unique
class RoadTypeTag(Enum):
    highway = 'highway'
    urban = 'urban'
    ramp = 'ramp'
    intersection = 'intersection'
    parking = 'parking'
    unknown = 'unknown'

class Metadata():
    def __init__(self) -> None:
        self.name = ''
        self.description = ''
        self.tags: List[SupportedTag] = []
        self.lidar_channels: Tuple[str] = ()
        self.radar_channels: Tuple[str] = ()
        self.main_sensor = ''
        self.weather = WeatherTag.unknown
        self.lighting = LightingTag.unknown
        self.road_condition = RoadConditionTag.unknown
        self.road_type = RoadTypeTag.unknown

    def to_dict(self) -> dict:
        return {
            'name': self.name,
            'description': self.description,
            'tags': [i.value for i in self.tags],
            'lidar_channels': self.lidar_channels,
            'radar_channels': self.radar_channels,
            'main_sensor': self.main_sensor,
            'weather': self.weather.value,
            'lighting': self.lighting.value,
            'road_condition': self.road_condition.value,
            'road_type': self.road_type.value,
        }

@unique
class CoordinateSystem(Enum):
    sensor = 0
    ego = 1
    world = 2

class DatasetCache(metaclass=ABCMeta):
    @abstractmethod
    def __init__(self) -> None:
        pass

    @abstractmethod
    def from_save(self, data) -> None:
        pass

    @abstractmethod
    def to_save(self):
        pass

class FrameStatistic():
    def __init__(self) -> None:
        self.obj_annotated: bool = True
        self.map_annotated: bool = True
        self.obj_class_num: Dict[str, int] = {}
        self.map_class_num: Dict[str, int] = {}

class SceneStatistic():
    def __init__(self) -> None:
        self.frame_num: int = 0
        self.obj_anno_frame_num: int = 0
        self.map_anno_frame_num: int = 0
        self.obj_class_num: Dict[str, int] = {}
        self.map_class_num: Dict[str, int] = {}

class DatasetStatistic():
    def __init__(self) -> None:
        self.scene_num: int = 0
        self.frame_num: int = 0
        self.obj_anno_frame_num: int = 0
        self.map_anno_frame_num: int = 0
        self.obj_class_num: Dict[str, int] = {}
        self.map_class_num: Dict[str, int] = {}

class Frame(metaclass=ABCMeta):
    @abstractmethod
    def __init__(self, meta: Metadata, calib: Dict[str, NDArray]) -> None:
        self.lidars = list()
        self.cameras = list()
        self.radars = list()
        self.meta: Metadata = meta
        self.calib: Dict[str, NDArray] = calib

    @property
    @abstractmethod
    def ego_pose(self) -> NDArray:
        pass

    def all_sensors(self) -> list:
        return self.lidars + self.cameras + self.radars

    def get_merged_lidar_xyzi_cloud(self, coordinate_system=CoordinateSystem.ego) \
            -> Optional[NDArray]:
        if not check_hint_coordinate_system(coordinate_system):
            return None
        # xyzi_idx = [self.meta.lidar_channels.index(i) for i in ('x', 'y', 'z', 'intensity')]
        clouds = [self.get_lidar_cloud(lidar, coordinate_system.ego) for lidar in self.lidars]
        if len(clouds) == 0:
            return np.zeros((0, 4))
        merged_cloud = np.vstack(clouds)[:, :4]
        if coordinate_system == CoordinateSystem.sensor:
            sensor_ego = self.calib[f'{self.meta.main_sensor}_ego']
            return transform_3d_cloud(merged_cloud, sensor_ego[:3, -1], sensor_ego[:3, :3] ,inverse=True)
        elif coordinate_system == CoordinateSystem.ego:
            return merged_cloud
        elif coordinate_system == CoordinateSystem.world:
            merged_cloud = merged_cloud.astype(np.float64)
            return transform_3d_cloud(merged_cloud, self.ego_pose[:3, -1], self.ego_pose[:3, :3])
        else:
            raise NotImplementedError

    @abstractmethod
    def get_lidar_cloud(self, lidar, coordinate_system=CoordinateSystem.ego) \
            -> Optional[NDArray]:
        if lidar not in self.lidars:
            print(f'ERROR: unrecognized lidar: {lidar}')
            return None
        return np.zeros((0, 4)) if check_hint_coordinate_system(coordinate_system) else None

    @abstractmethod
    def get_camera_image(self, camera, undistort=True) -> Optional[Image.Image]:
        if camera not in self.cameras:
            print(f'ERROR: unrecognized camera: {camera}')
            return None
        return Image.Image()

    def get_merged_radar_cloud(self, coordinate_system=CoordinateSystem.ego) \
            -> Optional[NDArray]:
        if not check_hint_coordinate_system(coordinate_system):
            return None
        clouds = [self.get_radar_cloud(radar, coordinate_system.ego) for radar in self.radars]
        if len(clouds) == 0:
            return np.zeros((0, 3))
        merged_cloud = np.vstack(clouds)
        if coordinate_system == CoordinateSystem.sensor:
            sensor_ego = self.calib[f'{self.meta.main_sensor}_ego']
            return transform_3d_cloud(merged_cloud, sensor_ego[:3, -1], sensor_ego[:3, :3] ,inverse=True)
        elif coordinate_system == CoordinateSystem.ego:
            return merged_cloud
        elif coordinate_system == CoordinateSystem.world:
            return transform_3d_cloud(merged_cloud, self.ego_pose[:3, -1], self.ego_pose[:3, :3])
        else:
            raise NotImplementedError

    @abstractmethod
    def get_radar_cloud(self, radar, coordinate_system=CoordinateSystem.ego) \
            -> Optional[NDArray]:
        if radar not in self.radars:
            print(f'ERROR: unrecognized radar: {radar}')
            return None
        return np.zeros((0, 5)) if check_hint_coordinate_system(coordinate_system) else None

    @abstractmethod
    def get_obstacles(self, coordinate_system=CoordinateSystem.ego) -> Optional[List[Obstacle]]:
        objects: List[Obstacle] = []
        return objects if check_hint_coordinate_system(coordinate_system) else None

    @abstractmethod
    def get_map_info(self, coordinate_system=CoordinateSystem.ego,
                     roi: Union[float, tuple]=None) -> Optional[MapInfo]:
        return MapInfo() if check_hint_coordinate_system(coordinate_system) else None

    @abstractmethod
    def get_timestamp(self, sensor=None) -> Optional[int]:
        """ get timestamp(us) in int, return frame timestamp when sensor is None
        """
        if sensor is None or sensor in self.all_sensors():
            return 1
        print(f'ERROR: unrecognized sensor: {sensor}')
        return None

    def get_sensor_np_data(self, sensor, coordinate_system=CoordinateSystem.ego) \
            -> Optional[NDArray]:
        if sensor in self.lidars:
            return self.get_lidar_cloud(sensor, coordinate_system)
        elif sensor in self.cameras:
            return np.asarray(self.get_camera_image(sensor, undistort=False))
        elif sensor in self.radars:
            return self.get_radar_cloud(sensor, coordinate_system)

    def matplot_on_bev(self, sensors: list=[], plot_obj=True, plot_map=True,
                       coordinate_system=CoordinateSystem.ego, dpi=1000) -> None:
        if not check_hint_coordinate_system(coordinate_system):
            return
        elif coordinate_system == CoordinateSystem.sensor:
            print('ERROR: plot frame elements on sensor coordinate function not ready')
            return
        axes = init_axes(dpi=dpi)
        axes.set_xlabel('x axis')
        axes.set_ylabel('y axis')
        axes.axis('equal')
        for sensor in sensors:
            if sensor in self.lidars:
                cloud = self.get_lidar_cloud(sensor, coordinate_system)
                matplot_2d_cloud(cloud[:, 0], cloud[:, 1], rgb=MATPLOTLIB_COLOR['blue'], label=sensor, axes=axes)
            elif sensor in self.radars:
                cloud = self.get_radar_cloud(sensor, coordinate_system)
                matplot_2d_cloud(cloud[:, 0], cloud[:, 1], rgb=MATPLOTLIB_COLOR['red'], label=sensor, axes=axes)
        if plot_obj:
            obstacles = self.get_obstacles(coordinate_system)
            if obstacles is not None:
                for obj in obstacles:
                    obj.matplot_on_bev(alpha=0.7, axes=axes)
        if plot_map:
            map_info = self.get_map_info(coordinate_system)
            if map_info is not None:
                for it in map_info.lane_dividers:
                    it.matplot(axes=axes)
                for it in map_info.road_edges:
                    it.matplot(axes=axes)
                for it in map_info.road_markings:
                    it.matplot(axes=axes)
                for it in map_info.traffic_lights:
                    it.matplot(axes=axes)
                for it in map_info.center_lines:
                    it.matplot(axes=axes)

    def matplot_on_camera(self, camera, plot_lidar=True, plot_radar=True, plot_obj=True, plot_map=True,
                          dpi=300, axes: Optional[plt.Axes]=None) -> None:
        assert camera in self.cameras
        LANE_DIVIDER_COLOR_MAP = {
            LaneDividerColor.white: MATPLOTLIB_COLOR['white'],
            LaneDividerColor.yellow: MATPLOTLIB_COLOR['yellow'],
            LaneDividerColor.blue: MATPLOTLIB_COLOR['blue'],
            LaneDividerColor.green: MATPLOTLIB_COLOR['green'],
            LaneDividerColor.red: MATPLOTLIB_COLOR['red'],
            LaneDividerColor.unknown: MATPLOTLIB_COLOR['purple'],
        }
        if axes is None:
            axes = init_axes(camera, dpi=dpi)
        axes.set_xlabel('width')
        axes.set_ylabel('height')
        axes.axis('equal')
        axes.grid(False)
        image = self.get_camera_image(camera)
        matplot_image(image, axes=axes)
        ego_camera = np.linalg.inv(self.calib[f'{camera}_ego'])
        camera_intr = self.calib[f'{camera}_intrinsic']
        if plot_lidar:
            cloud = self.get_merged_lidar_xyzi_cloud(CoordinateSystem.ego)
            matplot_cloud_to_image(cloud, ego_camera, camera_intr, (image.width, image.height),
                                   rgb='intensity', alpha=0.7, label='lidars', axes=axes)
        if plot_radar:
            cloud = self.get_merged_radar_cloud(CoordinateSystem.ego)
            matplot_cloud_to_image(cloud, ego_camera, camera_intr, (image.width, image.height),
                                   rgb=MATPLOTLIB_COLOR['red'], alpha=0.7, label='radars', axes=axes)
        if plot_obj:
            obstacles = self.get_obstacles(CoordinateSystem.ego)
            if obstacles is not None:
                for obj in obstacles:
                    obj.matplot_on_camera(ego_camera, camera_intr, (image.width, image.height), alpha=0.5, axes=axes)
        if plot_map:
            map_info = self.get_map_info()
            if map_info is not None:
                for it in map_info.lane_dividers:
                    scale = 1.5 if it.line_type == LaneDividerLineType.solid else 0.5
                    matplot_line_to_image(it.points, ego_camera, camera_intr, (image.width, image.height), scale,
                                          LANE_DIVIDER_COLOR_MAP[it.color], 0.5, axes=axes)
                for it in map_info.road_edges:
                    matplot_line_to_image(it.points, ego_camera, camera_intr, (image.width, image.height),
                                          rgb=MATPLOTLIB_COLOR['cyan'], alpha=0.5, axes=axes)
                for it in map_info.center_lines:
                    matplot_line_to_image(it.points, ego_camera, camera_intr, (image.width, image.height),
                                          rgb=MATPLOTLIB_COLOR['red'], alpha=0.5, axes=axes)

    def matplot_on_all_camera(self, camera_list: list, subplot_row: int, subplot_col: int,
                              plot_lidar=True, plot_radar=True, plot_obj=True, plot_map=True, dpi=300) -> None:
        assert subplot_row * subplot_col >= len(camera_list)
        fig = plt.figure('All cameras', dpi=dpi)
        axs = fig.subplots(subplot_row, subplot_col)
        for i in range(len(camera_list)):
            row = i // subplot_col
            col = i % subplot_col
            axes = axs[row][col]
            axes.set_title(camera_list[i])
            self.matplot_on_camera(camera_list[i], plot_lidar, plot_radar, plot_obj, plot_map, axes=axes)

    def cache(self, scene_path) -> None:
        lidar_dir = os.path.join(scene_path, CACHE_CONVENTION['lidar_dir'])
        camera_dir = os.path.join(scene_path, CACHE_CONVENTION['camera_dir'])
        radar_dir = os.path.join(scene_path, CACHE_CONVENTION['radar_dir'])
        ego_dir = os.path.join(scene_path, CACHE_CONVENTION['ego_motion_dir'])
        obj_dir = os.path.join(scene_path, CACHE_CONVENTION['object_anno_dir'])
        map_dir = os.path.join(scene_path, CACHE_CONVENTION['map_anno_dir'])
        sync_dir = os.path.join(scene_path, CACHE_CONVENTION['sync_info_dir'])
        sync_dict = {
            CACHE_CONVENTION['lidar_dir']: {},
            CACHE_CONVENTION['camera_dir']: {},
            CACHE_CONVENTION['radar_dir']: {},
        }
        # cache sensors' data
        for sensor in self.all_sensors():
            stamp = self.get_timestamp(sensor)
            np_data = self.get_sensor_np_data(sensor, CoordinateSystem.ego)
            if sensor in self.lidars:
                sync_dict[CACHE_CONVENTION['lidar_dir']][sensor] = stamp
                save_root = os.path.join(lidar_dir, sensor)
                np_data = np_data.astype(CACHE_CONVENTION['lidar_dtype'])
            elif sensor in self.cameras:
                sync_dict[CACHE_CONVENTION['camera_dir']][sensor] = stamp
                save_root = os.path.join(camera_dir, sensor)
                np_data = np_data.astype(CACHE_CONVENTION['camera_dtype'])
            elif sensor in self.radars:
                sync_dict[CACHE_CONVENTION['radar_dir']][sensor] = stamp
                save_root = os.path.join(radar_dir, sensor)
                np_data = np_data.astype(CACHE_CONVENTION['radar_dtype'])
            if not os.path.exists(save_root):
                os.mkdir(save_root)
            if sensor in self.lidars or sensor in self.radars:
                np_data.tofile(os.path.join(save_root, f'{stamp}.bin'))
            elif sensor in self.cameras:
                save_numpy_as_rgb_image(os.path.join(save_root, f'{stamp}.jpg'), np_data)
        frame_stamp = self.get_timestamp()
        # save ego motion
        assert self.ego_pose.shape == (4, 4)
        np.savetxt(os.path.join(ego_dir, f'{frame_stamp}.txt'), self.ego_pose)
        # save sync info
        write_json(os.path.join(sync_dir, f'{frame_stamp}.json'), sync_dict, force=False)
        # save object annotation
        obstacles = self.get_obstacles(CoordinateSystem.ego)
        if obstacles is not None:
            write_json(os.path.join(obj_dir, f'{frame_stamp}.json'), [obj.to_dict() for obj in obstacles], force=False)
        # save map annotation
        map_info = self.get_map_info(CoordinateSystem.ego)
        if map_info is not None:
            write_json(os.path.join(map_dir, f'{frame_stamp}.json'), map_info.to_dict(), force=False)

    def get_statistic(self) -> FrameStatistic:
        res = FrameStatistic()
        obstacles = self.get_obstacles(CoordinateSystem.ego)
        if obstacles is None:
            res.obj_annotated = False
        else:
            for obj in obstacles:
                if obj.category.value in res.obj_class_num.keys():
                    res.obj_class_num[obj.category.value] += 1
                else:
                    res.obj_class_num[obj.category.value] = 1
        map_info = self.get_map_info(CoordinateSystem.ego)
        if map_info is None:
            res.map_annotated = False
        else:
            res.map_class_num = {
                'lane divider': len(map_info.lane_dividers),
                'road edge': len(map_info.road_edges),
                'road marking': len(map_info.road_markings),
                'traffic sign': len(map_info.traffic_signs),
                'traffic light': len(map_info.traffic_lights),
            }
        return res

    def get_obstacles_from_path(self, path: str, json_folder: str, with_motion=False,
                                coordinate_system=CoordinateSystem.ego) \
            -> Union[List[MotionObstacle], List[Obstacle], None]:
        json_path = os.path.join(path, self.meta.name, json_folder, f'{self.get_timestamp()}.json')
        if not os.path.exists(json_path):
            return None
        annos = read_json(json_path)
        objects = []
        for anno in annos:
            if with_motion:
                objects.append(MotionObstacle.from_dict(anno))
            else:
                objects.append(Obstacle.from_dict(anno))
        if coordinate_system == CoordinateSystem.ego:
            pass
        elif coordinate_system == CoordinateSystem.sensor:
            sensor_ego = self.calib[f'{self.meta.main_sensor}_ego']
            for obj in objects:
                obj.shift_coordinate_system(sensor_ego[:3, -1], sensor_ego[:3, :3], inverse=True)
        elif coordinate_system == CoordinateSystem.world:
            for obj in objects:
                obj.shift_coordinate_system(self.ego_pose[:3, -1], self.ego_pose[:3, :3])
        return objects

class Scene(metaclass=ABCMeta):
    @abstractmethod
    def __init__(self) -> None:
        self.meta = Metadata()
        self.calib: Dict[str, NDArray] = {}
        self.frames: List[Frame] = []
        self.__ca_interval_s: float = -1
        self.__obstacle_tracks: Dict[str, ObjectTrack] = {}
        self.__motion_obstacles: Dict[int, List[MotionObstacle]] = {}

    def cache(self, path, force_clear: bool=False) -> bool:
        scene_path = os.path.join(path, self.meta.name)
        if check_create_or_clear_path(scene_path, force_clear):
            # cache meta dict
            meta_path = os.path.join(scene_path, CACHE_CONVENTION['metadata'])
            write_json(meta_path, self.meta.to_dict(), force=False)
            # cache calibraion matrices
            calib_dir = os.path.join(scene_path, CACHE_CONVENTION['calibration_dir'])
            os.mkdir(calib_dir)
            for name, matrix in self.calib.items():
                assert matrix.shape == (4, 4) or matrix.shape == (3, 3)
                calib_path = os.path.join(calib_dir, f'{name}.txt')
                np.savetxt(calib_path, matrix)
            # cache frames
            os.mkdir(os.path.join(scene_path, CACHE_CONVENTION['lidar_dir']))
            os.mkdir(os.path.join(scene_path, CACHE_CONVENTION['camera_dir']))
            os.mkdir(os.path.join(scene_path, CACHE_CONVENTION['radar_dir']))
            os.mkdir(os.path.join(scene_path, CACHE_CONVENTION['ego_motion_dir']))
            os.mkdir(os.path.join(scene_path, CACHE_CONVENTION['object_anno_dir']))
            os.mkdir(os.path.join(scene_path, CACHE_CONVENTION['map_anno_dir']))
            os.mkdir(os.path.join(scene_path, CACHE_CONVENTION['sync_info_dir']))
            for frame in self.frames:
                frame.cache(scene_path)
            print(f'INFO: scene {self.meta.name} cached')
            return True
        else:
            print(f'INFO: abort scene {self.meta.name} cache')
            return False

    def get_statistic(self) -> SceneStatistic:
        res = SceneStatistic()
        res.frame_num = len(self.frames)
        for frame in self.frames:
            info = frame.get_statistic()
            if info.obj_annotated:
                res.obj_anno_frame_num += 1
            if info.map_annotated:
                res.map_anno_frame_num += 1
            for key in info.obj_class_num.keys():
                if key in res.obj_class_num.keys():
                    res.obj_class_num[key] += info.obj_class_num[key]
                else:
                    res.obj_class_num[key] = info.obj_class_num[key]
            for key in info.map_class_num.keys():
                if key in res.map_class_num.keys():
                    res.map_class_num[key] += info.map_class_num[key]
                else:
                    res.map_class_num[key] = info.map_class_num[key]
        return res

    def get_obstacles_with_motion(self, ca_interval_s: float=3, frame_id: int=-1,
                                  coordinate_system=CoordinateSystem.ego,
                                  obj_anno_root: str=None, json_folder: str=None) \
            -> Union[Dict[int, List[MotionObstacle]], List[MotionObstacle], None]:
        if self.__infer_motion_info(ca_interval_s, coordinate_system, obj_anno_root, json_folder):
            # assert len(self.frames) == len(self.__motion_obstacles)
            if frame_id < 0:
                return self.__motion_obstacles
            elif frame_id < len(self.frames):
                frame_stamp = self.frames[frame_id].get_timestamp()
                if frame_stamp not in self.__motion_obstacles.keys():
                    print(f'INFO: no obstacle trace at frame {frame_stamp}')
                    return None
                return self.__motion_obstacles[frame_stamp]
            else:
                print(f'ERROR: frame_id {frame_id} exceeds the number of frames {len(self.frames)}')
                return None
        else:
            return None

    def __infer_motion_info(self, ca_interval_s: float, coordinate_system: CoordinateSystem,
                            obj_anno_root: str, json_folder: str) -> bool:
        if SupportedTag.continuous_anno not in self.meta.tags:
            print(f'ERROR: scene is not {SupportedTag.continuous_anno.value}, then motion info cannot be inferred')
            return False
        if ca_interval_s < MIN_CA_INTERVAL_S:
            print(f'ERROR: {ca_interval_s}s of interval is too short to find enough context to infer motion info')
            return False
        if not np.allclose(ca_interval_s, self.__ca_interval_s):
            self.__extract_tracks(coordinate_system, obj_anno_root, json_folder)
            for track in self.__obstacle_tracks.values():
                track.infer_motion(ca_interval_s)
            self.__tracks_to_frames_anno()
            self.__ca_interval_s = ca_interval_s
        return True

    def __extract_tracks(self, coordinate_system: CoordinateSystem,
                         obj_anno_root: str, json_folder: str) -> None:
        self.__obstacle_tracks.clear()
        for frame in self.frames:
            obstacles = frame.get_obstacles(coordinate_system) if obj_anno_root is None else \
                frame.get_obstacles_from_path(obj_anno_root, json_folder,
                                              coordinate_system=coordinate_system)
            if obstacles is None:
                continue
            for obj in obstacles:
                res = self.__obstacle_tracks.get(obj.id)
                if res is None:
                    self.__obstacle_tracks[obj.id] = ObjectTrack(frame.get_timestamp(), obj)
                else:
                    res.traces.update({frame.get_timestamp(): obj})

    def __tracks_to_frames_anno(self) -> None:
        self.__motion_obstacles.clear()
        for track in self.__obstacle_tracks.values():
            assert set(track.traces.keys()) == set(track.velocity.keys()) == set(track.acceleration.keys())
            for frame_stamp in track.traces.keys():
                motion_obj = MotionObstacle(
                    id_=track.get_id(),
                    category=track.get_category(),
                    center=track.traces[frame_stamp].box.center.to_list(),
                    size=track.traces[frame_stamp].box.size.to_list(),
                    rotation=track.traces[frame_stamp].box.rotation.to_list(),
                    velocity=track.velocity[frame_stamp].to_list(),
                    acceleration=track.acceleration[frame_stamp].to_list(),
                    num_lidar_pts=track.traces[frame_stamp].num_lidar_pts,
                    num_radar_pts=track.traces[frame_stamp].num_radar_pts,
                )
                if frame_stamp in self.__motion_obstacles.keys():
                    self.__motion_obstacles[frame_stamp].append(motion_obj)
                else:
                    self.__motion_obstacles.update({frame_stamp: [motion_obj]})

class DatasetDevkit(metaclass=ABCMeta):
    @abstractmethod
    def __init__(self, path=None, **kwargs) -> None:
        self.scenes:List[Scene] = []
        if path is not None:
            if self.check_file_structure(path):
                self.load_dataset(path, **kwargs)

    # public methods
    @staticmethod
    @abstractmethod
    def check_file_structure(path) -> bool:
        """check file structure of path and print check result message
        :param path: path of dataset
        :return: check result, True(pass) or False(fail)
        """
        print(f'INFO: checking file structure of: {path}')
        if not os.path.isdir(path):
            print(f'ERROR: {path} is not a directory')
            return False
        return True

    @abstractmethod
    def load_dataset(self, path) -> None:
        pass

    def cache(self, path, force_clear: bool=False) -> bool:
        if check_create_or_clear_path(path, force_clear):
            for scene in self.scenes:
                scene.cache(path, force_clear=False)
            print('INFO: dataset cached')
            return True
        else:
            print('INFO: abort dataset cache')
            return False

    def get_statistic(self) -> DatasetStatistic:
        res = DatasetStatistic()
        res.scene_num = len(self.scenes)
        for scene in self.scenes:
            info = scene.get_statistic()
            res.frame_num += info.frame_num
            res.obj_anno_frame_num += info.obj_anno_frame_num
            res.map_anno_frame_num += info.map_anno_frame_num
            for key in info.obj_class_num.keys():
                if key in res.obj_class_num.keys():
                    res.obj_class_num[key] += info.obj_class_num[key]
                else:
                    res.obj_class_num[key] = info.obj_class_num[key]
            for key in info.map_class_num.keys():
                if key in res.map_class_num.keys():
                    res.map_class_num[key] += info.map_class_num[key]
                else:
                    res.map_class_num[key] = info.map_class_num[key]
        return res

    # private methods
    @abstractmethod
    def _cache_for_fast_load(self) -> DatasetCache:
        pass

    # def __repr__(self) -> str:
    #     msgs = 'Dataset Summary\n'
    #     info = self.get_statistic()
    #     msgs += f'{info.scene_num} scenes\n'
    #     msgs += f'{info.frame_num} frames\n'
    #     msgs += f'{info.obj_anno_frame_num} frames have object annotation\n'
    #     msgs += f'{info.map_anno_frame_num} frames have map annotation\n'
    #     for key, val in info.obj_class_num.items():
    #         msgs += f'- {val} {key}\n'
    #     for key, val in info.map_class_num.items():
    #         msgs += f'- {val} {key}\n'
    #     return msgs
