import os
from pathlib import Path
from typing import Dict, List, Tuple, Optional, Union
import numpy as np
from numpy.typing import NDArray
from PIL import Image
from tqdm import tqdm

from av2.datasets.sensor.constants import RingCameras, StereoCameras, AnnotationCategories
import av2.utils.io as io_utils
from av2.datasets.sensor.av2_sensor_dataloader import AV2SensorDataLoader
from av2.map.lane_segment import LaneType, LaneMarkType, LaneSegment
from av2.map.map_api import ArgoverseStaticMap
import av2.geometry.polyline_utils as polyline_utils

from common_util import read_json, write_json
from common_util import transform_3d_cloud, quaternion_to_euler
from common_util import merge_close_lines, get_lane_segments_id_dict, remove_duplicates
from dataset_sdk import SupportedTag, WeatherTag, LightingTag, RoadConditionTag, RoadTypeTag, Metadata
from dataset_sdk import CoordinateSystem, DatasetCache, Frame, Scene, DatasetDevkit, Obstacle, ObstacleCategory
from dataset_sdk import LaneDivider, LaneDividerLineType, LaneDividerColor
from dataset_sdk import LaneDividerJoint
from dataset_sdk import RoadEdge, RoadEdgeCategory
from dataset_sdk import RoadMarking, RoadMarkingCategory
from dataset_sdk import TrafficSign, TrafficSignCategory
# from dataset_sdk import TrafficLight, TrafficLightType, TrafficLightStatus
from dataset_sdk import CenterLine, CenterLineTopoPoint, CenterLineConnect
from dataset_sdk import MapInfo

__all__ = ['Argoverse2Devkit']

OUT_LIDAR_CHANNELS = ('x', 'y', 'z', 'intensity', 'laser_number', 'offset_ns')

MAIN_SENSOR = 'up_lidar'
LIDARS = [MAIN_SENSOR]
CAMERAS = [cam.value for cam in RingCameras] + [cam.value for cam in StereoCameras]

SPLIT = 'train'
ANNOTATED_SPLITS = ['train', 'val']

OBJ_CATEGORY_MAP = {
    AnnotationCategories.ANIMAL:                            ObstacleCategory.debris,
    AnnotationCategories.ARTICULATED_BUS:                   ObstacleCategory.bus,
    AnnotationCategories.BICYCLE:                           ObstacleCategory.bicycle,
    AnnotationCategories.BICYCLIST:                         ObstacleCategory.ignore,
    AnnotationCategories.BOLLARD:                           ObstacleCategory.debris,
    AnnotationCategories.BOX_TRUCK:                         ObstacleCategory.truck,
    AnnotationCategories.BUS:                               ObstacleCategory.bus,
    AnnotationCategories.CONSTRUCTION_BARREL:               ObstacleCategory.traffic_cone,
    AnnotationCategories.CONSTRUCTION_CONE:                 ObstacleCategory.traffic_cone,
    AnnotationCategories.DOG:                               ObstacleCategory.debris,
    AnnotationCategories.LARGE_VEHICLE:                     ObstacleCategory.car,
    AnnotationCategories.MESSAGE_BOARD_TRAILER:             ObstacleCategory.trailer,
    AnnotationCategories.MOBILE_PEDESTRIAN_CROSSING_SIGN:   ObstacleCategory.ignore,
    AnnotationCategories.MOTORCYCLE:                        ObstacleCategory.motorcycle,
    AnnotationCategories.MOTORCYCLIST:                      ObstacleCategory.ignore,
    AnnotationCategories.OFFICIAL_SIGNALER:                 ObstacleCategory.ignore,
    AnnotationCategories.PEDESTRIAN:                        ObstacleCategory.pedestrian,
    AnnotationCategories.RAILED_VEHICLE:                    ObstacleCategory.ignore,
    AnnotationCategories.REGULAR_VEHICLE:                   ObstacleCategory.car,
    AnnotationCategories.SCHOOL_BUS:                        ObstacleCategory.bus,
    AnnotationCategories.SIGN:                              ObstacleCategory.ignore,
    AnnotationCategories.STOP_SIGN:                         ObstacleCategory.ignore,
    AnnotationCategories.STROLLER:                          ObstacleCategory.pedestrian,
    AnnotationCategories.TRAFFIC_LIGHT_TRAILER:             ObstacleCategory.ignore,
    AnnotationCategories.TRUCK:                             ObstacleCategory.truck,
    AnnotationCategories.TRUCK_CAB:                         ObstacleCategory.truck,
    AnnotationCategories.VEHICULAR_TRAILER:                 ObstacleCategory.trailer,
    AnnotationCategories.WHEELCHAIR:                        ObstacleCategory.pedestrian,
    AnnotationCategories.WHEELED_DEVICE:                    ObstacleCategory.pedestrian,
    AnnotationCategories.WHEELED_RIDER:                     ObstacleCategory.pedestrian,
}

DIVIDER_COLOR_MAP = {
    LaneMarkType.DASH_SOLID_YELLOW:     LaneDividerColor.yellow,
    LaneMarkType.DASH_SOLID_WHITE:      LaneDividerColor.white,
    LaneMarkType.DASHED_WHITE:          LaneDividerColor.white,
    LaneMarkType.DASHED_YELLOW:         LaneDividerColor.yellow,
    LaneMarkType.DOUBLE_SOLID_YELLOW:   LaneDividerColor.yellow,
    LaneMarkType.DOUBLE_SOLID_WHITE:    LaneDividerColor.white,
    LaneMarkType.DOUBLE_DASH_YELLOW:    LaneDividerColor.yellow,
    LaneMarkType.DOUBLE_DASH_WHITE:     LaneDividerColor.white,
    LaneMarkType.SOLID_YELLOW:          LaneDividerColor.yellow,
    LaneMarkType.SOLID_WHITE:           LaneDividerColor.white,
    LaneMarkType.SOLID_DASH_WHITE:      LaneDividerColor.white,
    LaneMarkType.SOLID_DASH_YELLOW:     LaneDividerColor.yellow,
    LaneMarkType.SOLID_BLUE:            LaneDividerColor.blue,
    # LaneMarkType.NONE:                  LaneDividerColor.unknown,
    LaneMarkType.UNKNOWN:               LaneDividerColor.unknown,
}

def get_unique_lanedivider(lane_segments: List[LaneSegment]) -> Tuple[List[LaneDivider], List[LaneDivider]]:
    dashed_lines = []
    solid_lines = []
    num_lane_segments = len(lane_segments)
    id_dict = get_lane_segments_id_dict(lane_segments)
    visited = np.zeros((num_lane_segments, 2), dtype=bool)
    for i, ls in enumerate(lane_segments):
        if ls.left_mark_type == LaneMarkType.NONE and ls.right_mark_type == LaneMarkType.NONE:
            continue
        for pos, bound_type, bound_city, neighbor_id in zip([0, 1],
                                                            [ls.left_mark_type, ls.right_mark_type],
                                                            [ls.left_lane_boundary, ls.right_lane_boundary],
                                                            [ls.left_neighbor_id, ls.right_neighbor_id]):
            if (visited[i, pos]) or bound_type == LaneMarkType.NONE or np.isnan(bound_city.xyz).sum() > 0:
                continue
            if "DOUBLE" in bound_type or ('DASH' in bound_type and 'SOLID' in bound_type):
                bound_list = list(
                    polyline_utils.get_double_polylines(polyline=bound_city.xyz[:, :3], width_scaling_factor=0.1))
            else:
                bound_list = [bound_city.xyz]
            if 'DASH_SOLID' in bound_type:
                # type_list = [LaneDividerLineType.dashed, LaneDividerLineType.solid]
                type_list = [LaneDividerLineType.solid, LaneDividerLineType.solid]
            elif 'SOLID_DASH' in bound_type:
                # type_list = [LaneDividerLineType.solid, LaneDividerLineType.dashed]
                type_list = [LaneDividerLineType.solid, LaneDividerLineType.solid]
            else:
                num_lines = 2 if "DOUBLE" in bound_type else 1
                if "DASHED" in bound_type:
                    type_list = [LaneDividerLineType.dashed] * num_lines
                elif "SOLID" in bound_type:
                    type_list = [LaneDividerLineType.solid] * num_lines
            assert len(bound_list) == len(type_list)
            for points, type in zip(bound_list, type_list):
                if type == LaneDividerLineType.dashed:
                    dashed_lines.append(LaneDivider('placeholder', type,
                                                    DIVIDER_COLOR_MAP[bound_type],
                                                    points))
                elif type == LaneDividerLineType.solid:
                    solid_lines.append(LaneDivider('placeholder', type,
                                                    DIVIDER_COLOR_MAP[bound_type],
                                                    points))
                else:
                    print("Wrong lane divider type!")
                    print(bound_type)
            visited[i, pos] = True
            if neighbor_id is not None and neighbor_id in id_dict.keys():
                if lane_segments[id_dict[neighbor_id]].left_neighbor_id == ls.id:
                    visited[id_dict[neighbor_id], 0] = True
                elif lane_segments[id_dict[neighbor_id]].right_neighbor_id == ls.id:
                    visited[id_dict[neighbor_id], 1] = True
    return dashed_lines, solid_lines

def get_lane_dividers(av2map: ArgoverseStaticMap) -> List[LaneDivider]:
    lane_segments = av2map.get_scenario_lane_segments()
    dashed_lines, solid_lines = get_unique_lanedivider(lane_segments)       # todo: add center lines, joints
    dashed_lines = merge_close_lines(dashed_lines)
    solid_lines = merge_close_lines(solid_lines)
    dividers = dashed_lines + solid_lines
    for id_, item in enumerate(dividers):
        item.id = str(id_)
    return dividers

def get_lane_divider_joints(av2map: ArgoverseStaticMap) -> List[LaneDividerJoint]:
    return []

def get_road_edges(av2map: ArgoverseStaticMap) -> List[RoadEdge]:
    rode_edge: List[RoadEdge] = []
    for item in av2map.vector_drivable_areas.values():
        rode_edge.append(RoadEdge(item.id, RoadEdgeCategory.road_edge, item.xyz))
    rode_edge = remove_duplicates(rode_edge)
    rode_edge = merge_close_lines(rode_edge)
    for id_, item in enumerate(rode_edge):
        item.id = str(id_)
    return rode_edge

def get_road_markings(av2map: ArgoverseStaticMap) -> List[RoadMarking]:
    marks: List[RoadMarking] = []
    for it in av2map.vector_pedestrian_crossings.values():
        marks.append(RoadMarking(it.id, [RoadMarkingCategory.crosswalk],
                                 np.vstack([it.edge1.xyz, np.flip(it.edge2.xyz, axis=0)])))
    return marks

def get_center_lines(av2map: ArgoverseStaticMap) -> List[CenterLine]:
    center_lines: List[CenterLine] = []
    lane_segments = av2map.get_scenario_lane_segments()
    for ls in lane_segments:
        if not ls.is_intersection:
            centerline = av2map.get_lane_segment_centerline(ls.id)
            if np.isnan(centerline).sum() > 0:
                continue
            center_lines.append(CenterLine('placeholder', [], centerline))
    center_lines = merge_close_lines(center_lines)
    for id_, item in enumerate(center_lines):
        item.id = str(id_)
    return center_lines

TRAFFIC_SIGN_CATEGORY_MAP = {
    AnnotationCategories.MOBILE_PEDESTRIAN_CROSSING_SIGN.value: TrafficSignCategory.sign,
    AnnotationCategories.SIGN.value:                            TrafficSignCategory.sign,
    AnnotationCategories.STOP_SIGN.value:                       TrafficSignCategory.stop,
}

def get_traffic_signs(annotation_feather_path: Path, timestamp_ns: int, ego_pose: NDArray) -> List[TrafficSign]:
    assert ego_pose.shape == (4, 4)
    signs: List[TrafficSign] = []
    annotation_df = io_utils.read_feather(annotation_feather_path)
    for obj_anno in annotation_df.itertuples():
        if obj_anno.timestamp_ns != timestamp_ns:
            continue
        if obj_anno.category not in TRAFFIC_SIGN_CATEGORY_MAP.keys():
            continue
        id_ = obj_anno.track_uuid
        category = TRAFFIC_SIGN_CATEGORY_MAP[obj_anno.category]
        center = [obj_anno.tx_m, obj_anno.ty_m, obj_anno.tz_m]
        size = [obj_anno.length_m, obj_anno.width_m, obj_anno.height_m]
        rotation = quaternion_to_euler([obj_anno.qx, obj_anno.qy, obj_anno.qz, obj_anno.qw])
        sign = TrafficSign(id_, category, center, size, rotation)
        sign.shift_coordinate_system(ego_pose[:3, 3], ego_pose[:3, :3])
        signs.append(sign)
    return signs

class Argoverse2FrameCache():
    def __init__(self) -> None:
        self.ego_pose: NDArray = None

    @classmethod
    def from_loader(cls, av2loader: AV2SensorDataLoader, scene: str, frame_stamp: int) -> 'Argoverse2FrameCache':
        cache = cls()
        cache.ego_pose = av2loader.get_city_SE3_ego(scene, frame_stamp).transform_matrix
        return cache

    @classmethod
    def from_json(cls, data:dict) -> 'Argoverse2FrameCache':
        cache = cls()
        cache.ego_pose = np.array(data['ego_pose'])
        return cache

    def to_json(self) -> dict:
        return {
            'ego_pose': self.ego_pose.tolist(),
        }

class Argoverse2SceneCache():
    def __init__(self) -> None:
        self.lidars: List[str] = []
        self.cameras: List[str] = []
        self.calib: Dict[str, NDArray] = {}
        self.frames_cache: Dict[int, Argoverse2FrameCache] = {}

    @classmethod
    def from_loader(cls, av2loader: AV2SensorDataLoader, scene: str) -> 'Argoverse2SceneCache':
        cache = cls()
        sensors_pose = io_utils.read_ego_SE3_sensor(av2loader._data_dir / scene)
        cache.lidars = [lidar for lidar in LIDARS if lidar in sensors_pose.keys()]
        cache.cameras = [camera for camera in CAMERAS if camera in sensors_pose.keys()]
        cache.__abstract_calib(av2loader, scene)
        frame_stamps = av2loader.get_ordered_log_lidar_timestamps(scene)
        for frame_stamp in frame_stamps:
            if not cache.__check_sync_integrity(av2loader, scene, frame_stamp):
                continue
            cache.frames_cache[frame_stamp] = Argoverse2FrameCache.from_loader(av2loader, scene, frame_stamp)
        return cache

    @classmethod
    def from_json(cls, data: dict) -> 'Argoverse2SceneCache':
        cache = cls()
        cache.lidars = data['lidars']
        cache.cameras = data['cameras']
        cache.calib = {name: np.array(matrix) for name, matrix in data['calib'].items()}
        for frame_stamp, frame_data in data['frames_cache'].items():
            frame_stamp = int(frame_stamp)
            cache.frames_cache[frame_stamp] = Argoverse2FrameCache.from_json(frame_data)
        return cache

    def to_json(self) -> dict:
        return {
            'lidars': self.lidars,
            'cameras': self.cameras,
            'calib': {name: matrix.tolist() for name, matrix in self.calib.items()},
            'frames_cache': {
                frame_stamp: frame_cache.to_json() for frame_stamp, frame_cache in self.frames_cache.items()
            },
        }

    def __abstract_calib(self, av2loader: AV2SensorDataLoader, scene: str) -> None:
        self.calib[f'{MAIN_SENSOR}_ego'] = np.identity(4)
        for cam_name in self.cameras:
            camera_info = av2loader.get_log_pinhole_camera(scene, cam_name)
            self.calib[f'{cam_name}_ego'] = np.linalg.inv(camera_info.extrinsics)
            self.calib[f'{cam_name}_intrinsic'] = camera_info.intrinsics.K

    def __check_sync_integrity(self, av2loader: AV2SensorDataLoader, scene: str, frame_stamp: int) -> bool:
        for camera in self.cameras:
            if av2loader.get_closest_img_fpath(scene, camera, frame_stamp) is None:
                return False
        return True

class Argoverse2DatasetCache(DatasetCache):
    def __init__(self) -> None:
        self.scenes_cache: Dict[str, Argoverse2SceneCache] = {}

    @classmethod
    def from_loader(cls, av2loader: AV2SensorDataLoader) -> 'Argoverse2DatasetCache':
        cache = cls()
        scenes = av2loader.get_log_ids()
        for scene in tqdm(scenes, 'Iterating scenes cache'):
            cache.scenes_cache[scene] = Argoverse2SceneCache.from_loader(av2loader, scene)
        return cache

    @classmethod
    def from_save(cls, data: dict) -> 'Argoverse2DatasetCache':
        cache = cls()
        for scene_name, scene_data in data.items():
            cache.scenes_cache[scene_name] = Argoverse2SceneCache.from_json(scene_data)
        return cache

    def to_save(self) -> dict:
        return {
            scene_name: scene_cache.to_json() for scene_name, scene_cache in self.scenes_cache.items()
        }

class Argoverse2Frame(Frame):
    def __init__(self, meta: Metadata, calib: Dict[str, NDArray], av2loader: AV2SensorDataLoader,
                 scene: str, frame_stamp: int, frame_cache: Argoverse2FrameCache,
                 lidars: List[str], cameras: List[str]) -> None:
        super().__init__(meta, calib)
        self.__loader = av2loader
        self.__scene = scene
        self.__stamp_ns = frame_stamp
        self.__ego_pose = frame_cache.ego_pose
        self.lidars = lidars
        self.cameras = cameras
        # self.radars = []

    @property
    def ego_pose(self) -> NDArray:
        return self.__ego_pose

    def get_lidar_cloud(self, lidar: str, coordinate_system=CoordinateSystem.ego) -> Optional[NDArray]:
        if super().get_lidar_cloud(lidar, coordinate_system) is None:
            return None
        fpath = self.__loader.get_lidar_fpath(self.__scene, self.__stamp_ns)
        sweep_df = io_utils.read_feather(fpath)
        cloud = sweep_df[list(OUT_LIDAR_CHANNELS)].to_numpy().astype(np.float64)
        if coordinate_system == CoordinateSystem.world:
            cloud = transform_3d_cloud(cloud, self.ego_pose[:3, 3], self.ego_pose[:3, :3])
        return cloud

    def get_camera_image(self, camera: str, undistort=True) -> Optional[Image.Image]:
        if super().get_camera_image(camera) is None:
            return None
        fpath = self.__loader.get_closest_img_fpath(self.__scene, camera, self.__stamp_ns)
        np_image = io_utils.read_img(fpath)
        return Image.fromarray(np_image)

    def get_radar_cloud(self, radar: str, coordinate_system=CoordinateSystem.ego) -> Optional[NDArray]:
        return super().get_radar_cloud(radar, coordinate_system)

    def get_obstacles(self, coordinate_system=CoordinateSystem.ego) -> Optional[List[Obstacle]]:
        if SPLIT not in ANNOTATED_SPLITS:
            print(f'INFO: for split {SPLIT}, object annotation not available')
            return None
        if super().get_obstacles(coordinate_system) is None:
            return None
        elif coordinate_system == CoordinateSystem.sensor:
            print(f'INFO: sensor coordinate system for "get_obstacles" is based on ego')
        objects: List[Obstacle] = []
        annotation_fpath = self.__loader._data_dir / self.__scene / "annotations.feather"
        annotation_df = io_utils.read_feather(annotation_fpath)
        for obj_anno in annotation_df.itertuples():
            if obj_anno.timestamp_ns != self.__stamp_ns:
                continue
            id_ = obj_anno.track_uuid
            category = OBJ_CATEGORY_MAP[AnnotationCategories(obj_anno.category)]
            center = [obj_anno.tx_m, obj_anno.ty_m, obj_anno.tz_m]
            size = [obj_anno.length_m, obj_anno.width_m, obj_anno.height_m]
            rotation = quaternion_to_euler([obj_anno.qx, obj_anno.qy, obj_anno.qz, obj_anno.qw])
            num_lidar_pts = obj_anno.num_interior_pts
            num_radar_pts = 0
            obj = Obstacle(id_, category, center, size, rotation, num_lidar_pts, num_radar_pts)
            if coordinate_system == CoordinateSystem.world:
                obj.shift_coordinate_system(self.ego_pose[:3, 3], self.ego_pose[:3, :3])
            objects.append(obj)
        return objects

    def get_map_info(self, coordinate_system=CoordinateSystem.ego, roi: Union[float, tuple]=None) -> Optional[MapInfo]:
        if super().get_map_info(coordinate_system) is None:
            return None
        elif coordinate_system == CoordinateSystem.sensor:
            print(f'INFO: sensor coordinate system for "get_map_info" is based on ego')
        av2map = ArgoverseStaticMap.from_map_dir(self.__loader.get_log_map_dirpath(self.__scene))
        map_info = MapInfo()
        map_info.lane_dividers = get_lane_dividers(av2map)
        map_info.lane_divider_joints = get_lane_divider_joints(av2map)
        map_info.center_lines = get_center_lines(av2map)
        map_info.road_edges = get_road_edges(av2map)
        map_info.road_markings = get_road_markings(av2map)
        if coordinate_system != CoordinateSystem.world:
            map_info.shift_coordinate_system(self.ego_pose[:3, 3], self.ego_pose[:3, :3], inverse=True)
            if roi is not None:
                map_info = map_info.roi_filter(MapInfo.generate_roi_points(roi))
        return map_info

    def get_timestamp(self, sensor: str=None) -> Optional[int]:
        if super().get_timestamp(sensor) is None:
            return None
        if sensor is None or sensor == MAIN_SENSOR:
            return int(self.__stamp_ns / 1e3)
        else:
            stamp_ns = self.__loader._sdb.get_closest_cam_channel_timestamp(self.__stamp_ns, sensor, self.__scene)
            return int(stamp_ns / 1e3)

class Argoverse2Scene(Scene):
    def __init__(self, av2loader: AV2SensorDataLoader, scene_name: str, scene_cache: Argoverse2SceneCache) -> None:
        super().__init__()
        self.meta.name = scene_name
        # self.meta.description = ''
        self.meta.tags = [SupportedTag.continuous_anno, SupportedTag.lane_anno,
                          SupportedTag.edge_anno, SupportedTag.mark_anno, SupportedTag.center_line_anno]
        if SPLIT in ANNOTATED_SPLITS:
            self.meta.tags += [SupportedTag.obj_anno]
        self.meta.lidar_channels = OUT_LIDAR_CHANNELS
        # self.meta.radar_channels = ()
        self.meta.main_sensor = MAIN_SENSOR
        # self.weather = WeatherTag.unknown
        # self.lighting = LightingTag.unknown
        # self.road_condition = RoadConditionTag.unknown
        # self.road_type = RoadTypeTag.unknown
        self.calib = scene_cache.calib
        for frame_stamp, frame_cache in scene_cache.frames_cache.items():
            self.frames.append(Argoverse2Frame(self.meta, self.calib, av2loader,
                                               scene_name, frame_stamp, frame_cache,
                                               scene_cache.lidars, scene_cache.cameras))

class Argoverse2Devkit(DatasetDevkit):
    def __init__(self, path=None, split=SPLIT) -> None:
        super().__init__(path, split=split)

    @staticmethod
    def check_file_structure(path) -> bool:
        return DatasetDevkit.check_file_structure(path)

    def load_dataset(self, path, split=SPLIT) -> None:
        if not isinstance(path, Path):
            path = Path(path)
        path = path / split
        assert path.exists(), f'ERROR: split folder {path} not found'
        global SPLIT
        SPLIT = split
        av2loader = AV2SensorDataLoader(path, path)
        print(f'INFO: Argoverse2 api initialized')
        cache_path = os.path.join(path, f'argoverse2_{SPLIT}_fastload.cache')
        if os.path.exists(cache_path):
            cache = Argoverse2DatasetCache.from_save(read_json(cache_path))
            print(f'INFO: Argoverse2 {SPLIT} cache loaded')
        else:
            cache = self._cache_for_fast_load(av2loader)
            write_json(cache_path, cache.to_save())
        for scene_name, scene_cache in cache.scenes_cache.items():
            self.scenes.append(Argoverse2Scene(av2loader, scene_name, scene_cache))

    def _cache_for_fast_load(self, av2loader: AV2SensorDataLoader) -> Argoverse2DatasetCache:
        print(f'INFO: generating cache of argoverse2 for fast-reload ...')
        return Argoverse2DatasetCache.from_loader(av2loader)
