import os
import numpy as np
from numpy.typing import NDArray
from typing import List, Dict, Optional, Union
from math import cos, sin, pi
from natsort import natsorted
from PIL import Image
from tqdm import tqdm

from common_util import read_json, read_pickle, write_pickle, search_file_in_path, transform_3d_cloud, read_image
from common_util import line_in_polygon_roi
from dataset_sdk import SupportedTag, WeatherTag, LightingTag, RoadConditionTag, RoadTypeTag, Metadata
from dataset_sdk import CoordinateSystem, Frame, Scene, DatasetDevkit, Obstacle, ObstacleCategory
from dataset_sdk import LaneDivider, LaneDividerLineType, LaneDividerColor
from dataset_sdk import LaneDividerJoint
from dataset_sdk import RoadEdge, RoadEdgeCategory
from dataset_sdk import RoadMarking, RoadMarkingCategory
from dataset_sdk import TrafficSign, TrafficSignCategory
from dataset_sdk import TrafficLight, TrafficLightType, TrafficLightStatus
from dataset_sdk import MapInfo

__all__ = ['HongjingFrame', 'HongjingScene', 'HongjingDevkit']

HJ_CONVENTION = {
    'metadata': 'metadata.json',
    'calibration_dir': 'calib',
    'lidar_dir': 'lidars',
    'camera_dir': 'cameras',
    'radar_dir': 'radars',
    'ego_motion_dir': 'ego_motion',
    'object_anno_dir': 'obj_anno',
    'map_anno_dir': 'map_anno',
    'sync_info_dir': 'sync_info',
    'lidar_dtype': 'float32',
    'camera_dtype': 'uint8',
    'radar_dtype': 'float32',
    'cache_file_name': 'hongjing_fastload.cache',
}

def gather_scene(scene_path: str, convention: dict) -> dict:
    scene_cache = {
        'lidars': os.listdir(os.path.join(scene_path, convention['lidar_dir'])),
        'cameras': os.listdir(os.path.join(scene_path, convention['camera_dir'])),
        'radars': os.listdir(os.path.join(scene_path, convention['radar_dir'])),
        'frame_sync': {},
    }
    sync_list = search_file_in_path(os.path.join(scene_path, convention['sync_info_dir']),
                                    suffix='.json', remove_suf=True, sort=True)
    for frame in sync_list:
        scene_cache['frame_sync'][int(frame)] = \
            read_json(os.path.join(scene_path, convention['sync_info_dir'], f'{frame}.json'))
    return scene_cache

def get_metadata(meta_path) -> Metadata:
    meta = Metadata()
    metadata_dict: dict = read_json(meta_path)
    meta.name = metadata_dict['name']
    meta.description = metadata_dict['description']
    meta.tags = [SupportedTag(i) for i in metadata_dict['tags']]
    meta.lidar_channels = tuple(metadata_dict['lidar_channels'])
    meta.radar_channels = tuple(metadata_dict['radar_channels'])
    meta.main_sensor = metadata_dict['main_sensor']
    if 'weather' in metadata_dict.keys():
        meta.weather = WeatherTag(metadata_dict['weather'])
    if 'lighting' in metadata_dict.keys():
        meta.lighting = LightingTag(metadata_dict['lighting'])
    if 'road_condition' in metadata_dict.keys():
        meta.road_condition = RoadConditionTag(metadata_dict['road_condition'])
    if 'road_type' in metadata_dict.keys():
        meta.road_type = RoadTypeTag(metadata_dict['road_type'])
    return meta

def get_roi_mask(points: NDArray, roi=-1.) -> NDArray:
    assert points.shape[1] >= 2
    full_mask = np.array(range(points.shape[0]))
    if roi < 0:
        return full_mask
    return full_mask[points[:, 0]**2 + points[:, 1]**2 < roi**2]

ROI_SAMPLE = 32
def filter_line_pts(points: NDArray, roi=-1.) -> List[NDArray]:
    if roi < 0:
        return [points]
    roi_pts = [[cos(pi/ROI_SAMPLE*2*i)*roi, sin(pi/ROI_SAMPLE*2*i)*roi] for i in range(ROI_SAMPLE)]
    return line_in_polygon_roi(points, np.array(roi_pts))

class SmartCalibDict:
    def __init__(self, path: str, calib_dir: str,
                 lidars: List[str], cameras: List[str], radars: List[str]) -> None:
        self.__calib_root = os.path.join(path, calib_dir)
        self.__keys = []
        for i in lidars + cameras + radars:
            self.__keys.append(f'{i}_ego')
        for i in cameras:
            self.__keys.extend([f'{i}_intrinsic', f'{i}_distortion'])
        self.__calibs: Dict[str, NDArray] = {}

    def keys(self) -> List[str]:
        return self.__keys

    def __getitem__(self, key: str) -> NDArray:
        if key in self.__calibs:
            return self.__calibs[key]
        if key not in self.__keys:
            raise KeyError(f'ERROR: unknown calib key: {key}')
        calib_txt = os.path.join(self.__calib_root, f'{key}.txt')
        if os.path.exists(calib_txt):
            arr = np.loadtxt(calib_txt)
            if key.endswith('_ego'):
                assert arr.shape == (4, 4)
            elif key.endswith('_intrinsic'):
                assert arr.shape == (3, 3)
            elif key.endswith('_distortion'):
                assert arr.size in [4, 5, 8, 9, 10, 14]
            else:
                raise NotImplementedError(f'ERROR: somehow {key} is a valid key, but its suffix is un-recognized')
            self.__calibs[key] = arr
            return self.__calibs[key]
        else:
            raise FileNotFoundError(f'ERROR: calib file at {calib_txt} not found')

class HongjingFrame(Frame):
    def __init__(self, meta: Metadata, calib: Dict[str, NDArray], stamp: int,
                 path, convention: dict, cache: dict) -> None:
        super().__init__(meta, calib)
        self.__stamp = stamp
        self.__path = path
        self.__convention: dict = convention
        self.__sync: Dict[str, Dict[str, int]] = cache['frame_sync'][stamp]
        self.__ego_pose = None
        self.lidars: List[str] = cache['lidars']
        self.cameras: List[str] = cache['cameras']
        self.radars: List[str] = cache['radars']

    @property
    def ego_pose(self) -> NDArray:
        if self.__ego_pose is None:
            self.__ego_pose = np.loadtxt(
                os.path.join(self.__path, self.__convention['ego_motion_dir'], f'{self.__stamp}.txt'))
        return self.__ego_pose

    def get_sensor_params(self) -> dict:
        params = {}
        for lidar in self.lidars:
            lidar_path = os.path.join(self.__path[self.__path.rfind('/'):], self.__convention['lidar_dir'], lidar,
                                      f'{self.__sync[self.__convention["lidar_dir"]][lidar]}.bin')
            lidar_path = '.' + lidar_path
            sensor_ego = self.calib[f'{lidar}_ego']
            translation = sensor_ego[:3, 3]
            rotation = sensor_ego[:3, :3]
            params[lidar] = [lidar_path, translation, rotation]

        for camera in self.cameras:
            camera_path = os.path.join(self.__path[self.__path.rfind('/'):], self.__convention['camera_dir'],
                                       camera, f'{self.__sync[self.__convention["camera_dir"]][camera]}.jpg')
            camera_path = '.' + camera_path
            params[camera] = [camera_path]
        return params

    def get_lidar_cloud(self, lidar: str, coordinate_system=CoordinateSystem.ego) -> Optional[NDArray]:
        if super().get_lidar_cloud(lidar, coordinate_system) is None:
            return None
        cloud = np.fromfile(
            os.path.join(self.__path, self.__convention['lidar_dir'], lidar,
                         f'{self.__sync[self.__convention["lidar_dir"]][lidar]}.bin'),
            dtype=self.__convention['lidar_dtype']).reshape(-1, len(self.meta.lidar_channels))
        if coordinate_system == CoordinateSystem.sensor:
            lidar_ego = self.calib[f'{lidar}_ego']
            return transform_3d_cloud(cloud, lidar_ego[:3, -1], lidar_ego[:3, :3], inverse=True)
        elif coordinate_system == CoordinateSystem.ego:
            return cloud
        elif coordinate_system == CoordinateSystem.world:
            return transform_3d_cloud(cloud.astype(np.float64), self.ego_pose[:3, -1], self.ego_pose[:3, :3])
        else:
            raise NotImplementedError

    def get_camera_image(self, camera: str, undistort=True) -> Optional[Image.Image]:
        if super().get_camera_image(camera) is None:
            return None
        image_path = os.path.join(self.__path, self.__convention['camera_dir'], camera,
                                  f'{self.__sync[self.__convention["camera_dir"]][camera]}.jpg')
        if 'fisheye' in camera and undistort:
            if len(self.calib[f'{camera}_distortion']) == 9:
                return read_image(image_path)
            import cv2 as cv
            cv_img = cv.imread(image_path)
            cv_img = cv.undistort(cv_img, self.calib[f'{camera}_intrinsic'], self.calib[f'{camera}_distortion'],
                                  None, self.calib[f'{camera}_intrinsic'])
            return Image.fromarray(cv.cvtColor(cv_img, cv.COLOR_BGR2RGB))
        else:
            return read_image(image_path)

    def get_radar_cloud(self, radar: str, coordinate_system=CoordinateSystem.ego) -> Optional[NDArray]:
        if super().get_radar_cloud(radar, coordinate_system) is None:
            return None
        cloud = np.fromfile(
            os.path.join(self.__path, self.__convention['radar_dir'], radar,
                         f'{self.__sync[self.__convention["radar_dir"]][radar]}.bin'),
            dtype=self.__convention['radar_dtype']).reshape(-1, len(self.meta.radar_channels))
        if coordinate_system == CoordinateSystem.sensor:
            radar_ego = self.calib[f'{radar}_ego']
            return transform_3d_cloud(cloud, radar_ego[:3, -1], radar_ego[:3, :3] ,inverse=True)
        elif coordinate_system == CoordinateSystem.ego:
            return cloud
        elif coordinate_system == CoordinateSystem.world:
            return transform_3d_cloud(cloud, self.ego_pose[:3, -1], self.ego_pose[:3, :3])
        else:
            raise NotImplementedError

    def get_obstacles(self, coordinate_system=CoordinateSystem.ego) -> Optional[List[Obstacle]]:
        if super().get_obstacles(coordinate_system) is None:
            return None
        elif coordinate_system == CoordinateSystem.sensor:
            print(f'INFO: sensor coordinate system for "get_obstacles" is based on {self.meta.main_sensor}')
        anno_path = os.path.join(self.__path, self.__convention['object_anno_dir'], f'{self.__stamp}.json')
        if not os.path.exists(anno_path):
            # print(f'INFO: frame {self.__stamp} has no object annotation')
            return None
        objects: List[Obstacle] = []
        annos = read_json(anno_path)
        for anno in annos:
            objects.append(Obstacle.from_dict(anno))
        if coordinate_system == CoordinateSystem.ego:
            pass
        elif coordinate_system == CoordinateSystem.sensor:
            sensor_ego = self.calib[f'{self.meta.main_sensor}_ego']
            for obj in objects:
                obj.shift_coordinate_system(sensor_ego[:3, -1], sensor_ego[:3, :3], inverse=True)
        elif coordinate_system == CoordinateSystem.world:
            for obj in objects:
                obj.shift_coordinate_system(self.ego_pose[:3, -1], self.ego_pose[:3, :3])
        return objects

    def get_map_info(self, coordinate_system=CoordinateSystem.ego, roi: Union[float, tuple]=None) -> Optional[MapInfo]:
        if super().get_map_info(coordinate_system) is None:
            return None
        elif coordinate_system == CoordinateSystem.sensor:
            print(f'INFO: sensor coordinate system for "get_map_info" is based on {self.meta.main_sensor}')
        anno_path = os.path.join(self.__path, self.__convention['map_anno_dir'], f'{self.__stamp}.json')
        if not os.path.exists(anno_path):
            # print(f'INFO: frame {self.__stamp} has no map annotation')
            return None
        map_info = MapInfo.from_dict(read_json(anno_path))
        if roi is not None:
            map_info = map_info.roi_filter(MapInfo.generate_roi_points(roi))
        if coordinate_system == CoordinateSystem.ego:
            pass
        elif coordinate_system == CoordinateSystem.sensor:
            sensor_ego = self.calib[f'{self.meta.main_sensor}_ego']
            map_info.shift_coordinate_system(sensor_ego[:3, -1], sensor_ego[:3, :3], inverse=True)
        elif coordinate_system == CoordinateSystem.world:
            map_info.shift_coordinate_system(self.ego_pose[:3, -1], self.ego_pose[:3, :3])
        return map_info

    def get_timestamp(self, sensor=None) -> Optional[int]:
        if super().get_timestamp(sensor) is None:
            return None
        if sensor is None:
            return self.__stamp
        assert sensor in self.all_sensors()
        if sensor in self.lidars:
            return self.__sync[self.__convention["lidar_dir"]][sensor]
        elif sensor in self.cameras:
            return self.__sync[self.__convention["camera_dir"]][sensor]
        elif sensor in self.radars:
            return self.__sync[self.__convention["radar_dir"]][sensor]

class HongjingScene(Scene):
    def __init__(self, path, convention: dict, cache: dict) -> None:
        super().__init__()
        self.meta = get_metadata(os.path.join(path, convention['metadata']))
        self.calib = SmartCalibDict(path, convention['calibration_dir'],
                                    cache['lidars'], cache['cameras'], cache['radars'])
        for stamp in cache['frame_sync'].keys():
            self.frames.append(HongjingFrame(self.meta, self.calib, int(stamp), path, convention, cache))

    @staticmethod
    def check_path(path) -> bool:
        check_set = set(['metadata.json', 'calib', 'ego_motion', 'sync_info'])
        if os.path.isdir(path):
            file_set = set(os.listdir(path))
            miss_set = check_set - file_set
            if len(miss_set) == 0:
                return True
        return False

    @classmethod
    def from_path(cls, path, convention: dict=HJ_CONVENTION) -> Optional['HongjingScene']:
        if not HongjingScene.check_path(path):
            return None
        scene_cache = {}
        for sensor in ['lidar', 'camera', 'radar']:
            sensor_dir = os.path.join(path, convention[f'{sensor}_dir'])
            scene_cache[f'{sensor}s'] = os.listdir(sensor_dir) if os.path.isdir(sensor_dir) else []
        scene_cache['frame_sync'] = {}
        sync_list = search_file_in_path(os.path.join(path, convention['sync_info_dir']),
                                        suffix='.json', remove_suf=True, sort=True)
        for frame in sync_list:
            scene_cache['frame_sync'][int(frame)] = \
                read_json(os.path.join(path, convention['sync_info_dir'], f'{frame}.json'))
        return cls(path, convention, scene_cache)

class HongjingDevkit(DatasetDevkit):
    def __init__(self, path=None, update_cache=True) -> None:
        super().__init__(path, update_cache=update_cache)

    @staticmethod
    def check_file_structure(path) -> bool:
        if not DatasetDevkit.check_file_structure(path):
            return False
        for scene in os.listdir(path):
            if HongjingScene.check_path(os.path.join(path, scene)):
                print(f'INFO: check passed')
                return True
        print(f'INFO: check failed, no scene found under path')
        return False

    def load_dataset(self, path, update_cache=True, convention: dict=HJ_CONVENTION) -> None:
        def check_cache_valid() -> None:
            if not os.path.isfile(cache_path):
                return False
            if not update_cache:
                return True
            for file in os.listdir(path):
                file_path = os.path.join(path, file)
                if not HongjingScene.check_path(file_path):
                    continue
                if os.path.getmtime(file_path) > os.path.getmtime(cache_path):
                    print(f'INFO: {file} is newer than cache, update cache ...')
                    return False
            return True

        def reformat_frame_sync(syncs: Dict[str, dict]) -> Dict[int, dict]:
            return {int(key): val for key, val in syncs.items()}

        self.scenes.clear()
        cache_path = os.path.join(path, convention['cache_file_name'])
        if check_cache_valid():
            cache: dict = read_pickle(cache_path)
            for scene_cache in cache.values():
                scene_cache['frame_sync'] = reformat_frame_sync(scene_cache['frame_sync'])
        else:
            cache = read_pickle(cache_path) if os.path.isfile(cache_path) else {}
            cache = self._cache_for_fast_load(path, convention, cache)
            if os.path.exists(cache_path):
                if os.path.exists(f'{cache_path}.bak'):
                    os.remove(f'{cache_path}.bak')
                os.rename(cache_path, f'{cache_path}.bak')
            write_pickle(cache_path, cache, force=True)
        for scene, scene_cache in cache.items():
            self.scenes.append(HongjingScene(os.path.join(path, scene), convention, scene_cache))

    def _cache_for_fast_load(self, path, convention: dict, cache: Dict[str, dict]={}) -> dict:
        def update(result, scene: str) -> None:
            pbar.set_description(f'{scene} gathered')
            cache[scene] = result
            pbar.update()

        sub_folders = natsorted(os.listdir(path))
        # multiprocess version
        # from multiprocessing import Pool, cpu_count
        # from functools import partial
        # pool = Pool(cpu_count() - 2)
        # pbar = tqdm(total=len(sub_folders))
        # for scene in sub_folders:
        #     if scene in cache.keys():
        #         pbar.update()
        #         continue
        #     scene_path = os.path.join(path, scene)
        #     if not HongjingScene.check_path(scene_path):
        #         pbar.update()
        #         continue
        #     pool.apply_async(gather_scene, (scene_path, convention),
        #                      callback=partial(update, scene=scene))
        # pool.close()
        # pool.join()
        # GIL version
        pbar = tqdm(sub_folders)
        for scene in pbar:
            if scene in cache.keys():
                continue
            scene_path = os.path.join(path, scene)
            if HongjingScene.check_path(scene_path):
                pbar.set_description(f'loading {scene} cache')
                cache[scene] = gather_scene(scene_path, convention)
        # del missing scenes
        cache_set = set(cache.keys())
        current_set = set(sub_folders)
        miss_set = cache_set - current_set
        for scene in miss_set:
            del cache[scene]
        return cache
