# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from os import path as osp

import mmcv
import numpy as np
import pyquaternion
from nuscenes.utils.data_classes import Box as NuScenesBox

from ..core import show_result
from ..core.bbox import Box3DMode, Coord3DMode, LiDARInstance3DBoxes
from .builder import DATASETS
from .custom_3d import Custom3DDataset
from .nuscenes_dataset import NuScenesDataset4d
from .pipelines import Compose
from .pipelines import loading 
from mmcv.parallel import collate
import pickle

@DATASETS.register_module()
class HJDataset(NuScenesDataset4d):
    NameMapping = {
        'movable_object.barrier': 'barrier',
        'vehicle.bicycle': 'bicycle',
        'vehicle.bus.bendy': 'bus',
        'vehicle.bus.rigid': 'bus',
        'vehicle.car': 'car',
        'vehicle.construction': 'construction_vehicle',
        'vehicle.motorcycle': 'motorcycle',
        'human.pedestrian.adult': 'pedestrian',
        'human.pedestrian.child': 'pedestrian',
        'human.pedestrian.construction_worker': 'pedestrian',
        'human.pedestrian.police_officer': 'pedestrian',
        'movable_object.trafficcone': 'traffic_cone',
        'vehicle.trailer': 'trailer',
        'vehicle.truck': 'truck'
    }
    DefaultAttribute = {
        'car': 'vehicle.parked',
        'pedestrian': 'pedestrian.moving',
        'trailer': 'vehicle.parked',
        'truck': 'vehicle.parked',
        'bus': 'vehicle.moving',
        'motorcycle': 'cycle.without_rider',
        'construction_vehicle': 'vehicle.parked',
        'bicycle': 'cycle.without_rider',
        'barrier': '',
        'traffic_cone': '',
    }
    AttrMapping = {
        'cycle.with_rider': 0,
        'cycle.without_rider': 1,
        'pedestrian.moving': 2,
        'pedestrian.standing': 3,
        'pedestrian.sitting_lying_down': 4,
        'vehicle.moving': 5,
        'vehicle.parked': 6,
        'vehicle.stopped': 7,
    }
    AttrMapping_rev = [
        'cycle.with_rider',
        'cycle.without_rider',
        'pedestrian.moving',
        'pedestrian.standing',
        'pedestrian.sitting_lying_down',
        'vehicle.moving',
        'vehicle.parked',
        'vehicle.stopped',
    ]
    # https://github.com/nutonomy/nuscenes-devkit/blob/57889ff20678577025326cfc24e57424a829be0a/python-sdk/nuscenes/eval/detection/evaluate.py#L222 # noqa
    ErrNameMapping = {
        'trans_err': 'mATE',
        'scale_err': 'mASE',
        'orient_err': 'mAOE',
        'vel_err': 'mAVE',
        'attr_err': 'mAAE'
    }
    CLASSES = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle',
               'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone',
               'barrier')

    def __init__(self, lidar=None,**kwargs):

        super().__init__(**kwargs)
        self.lidar = lidar if lidar is not None else 'vehicle'
    def add_root_path(self, data_infos):
        for data in data_infos:
            for lp in data['lidar_path']:
                data['lidar_path'][lp] = self.data_root + data['lidar_path'][lp]
        # input_dict['pts_filename'] = input_dict['pts_filename'][:23] + input_dict['pts_filename'][34:]
            for cam in data['cams']:
                data['cams'][cam]['data_path'] = self.data_root + data['cams'][cam]['data_path']
            # tmp = tmp[:23] + tmp[34:]
        
    def load_annotations(self, ann_file):
        """Load annotations from ann_file.

        Args:
            ann_file (str): Path of the annotation file.

        Returns:
            list[dict]: List of annotations sorted by timestamps.
        """
        data = mmcv.load(ann_file, file_format='pkl')
        # data_infos = data_infos[::self.load_interval]
        data_infos = data['infos']
        self.metadata = data['metadata']
        self.version = self.metadata['version']
        self.add_root_path(data_infos)
        self.key_frame = None
        if not self.test_mode:
            self.filter_end_frame(data_infos)
            self.gap_frame(data_infos)
        # data_infos = data_infos[0:300]
        return data_infos

    def filter_end_frame(self, data_infos):
        key_frame = []
        for k, d in enumerate(data_infos):
            if d['lane_gt'] or d['object_gt']:
                key_frame.append(k)
        self.key_frame = key_frame
        return self.key_frame
    def gap_frame(self, data_infos: dict):
        id_scenes_frame = [None,None]
        flag = 0
        key_frame = []
        for index in self.key_frame:
            if (data_infos[index]['id_scenes_frame'][0] == id_scenes_frame[0]) and \
                (data_infos[index]['id_scenes_frame'][1] == (id_scenes_frame[1] + 1)):
                flag += 1
            else:
                flag = 0
            id_scenes_frame = data_infos[index]['id_scenes_frame']
            if flag ==self.load_interval:
                flag = 0
            if data_infos[index]['lane_gt'] and data_infos[index]['object_gt']:
                flag = 0
            if flag == 0:
                key_frame.append(index)
        self.key_frame = key_frame
    def __len__(self,):

        if self.key_frame is not None:
            return len(self.key_frame)
        else:
            return super().__len__()
    def get_data_info(self, index):
        """Get data info according to the given index.
        Args:
            index (int): Index of the sample data to get.

        Returns:
            dict: Data information that will be passed to the data
                preprocessing pipelines. It includes the following keys:

                - sample_idx (str): Sample index.
                - pts_filename (str): Filename of point clouds.
                - sweeps (list[dict]): Infos of sweeps.
                - timestamp (float): Sample timestamp.
                - img_filename (str, optional): Image filename.
                - lidar2img (list[np.ndarray], optional): Transformations
                    from lidar to different cameras.
                - ann_info (dict): Annotation info.
        """
        info = self.data_infos[index]

        # standard protocol modified from SECOND.Pytorch
        input_dict = dict(
            sample_idx=info['id_scenes_frame'][0],
            pts_filename=info['lidar_path'][self.lidar],
            # sweeps=info['sweeps'],
            timestamp=info['timestamp'] / 1e6,
            # lidar2ego=info['lidar2ego'][self.lidar],
            lidar2ego=info['lidar2ego']['vehicle'],
            ego2global=info['ego2global'],
            # token=info['id_scenes_frame'][0]
        )
        if 'vec_lines' in info:
            vec_lines = self.get_map(info['vec_lines'])
            input_dict["gt_maps"] = self.get_lane_gt(vec_lines)
        if 'ann_infos' in info:
            input_dict['ann_infos'] = info['ann_infos']
        if self.modality['use_camera']:
            if self.img_info_prototype == 'mmcv':
                image_paths = []
                lidar2img_rts = []
                for cam_type, cam_info in info['cams'].items():
                    image_paths.append(cam_info['data_path'])
                    # obtain lidar to image transformation matrix
                    lidar2cam_r = np.linalg.inv(
                        cam_info['sensor2lidar_rotation'])
                    lidar2cam_t = cam_info[
                        'sensor2lidar_translation'] @ lidar2cam_r.T
                    lidar2cam_rt = np.eye(4)
                    lidar2cam_rt[:3, :3] = lidar2cam_r.T
                    lidar2cam_rt[3, :3] = -lidar2cam_t
                    intrinsic = cam_info['cam_intrinsic']
                    viewpad = np.eye(4)
                    viewpad[:intrinsic.shape[0], :intrinsic.
                            shape[1]] = intrinsic
                    lidar2img_rt = (viewpad @ lidar2cam_rt.T)
                    lidar2img_rts.append(lidar2img_rt)

                input_dict.update(
                    dict(
                        img_filename=image_paths,
                        lidar2img=lidar2img_rts,
                    ))

                if not self.test_mode:
                    annos = self.get_ann_info(index)
                    input_dict['ann_info'] = annos
            else: 
                assert 'bevdet' in self.img_info_prototype
                input_dict.update(dict(curr=info))
                if '4d' in self.img_info_prototype:
                    info_adj_list = self.get_adj_info(info, index)
                    input_dict.update(dict(adjacent=info_adj_list))
                
        return input_dict
    


    def comb_lines(self, lines,eps1=3.0, eps2=1.0, reps1=120, reps2=10.0):
        def l1l2error(l1,l2):
            return ((l1[0] - l2[0])**2 + (l1[1] - l2[1])**2)**(0.5)
        lin = []
        len_lines = len(lines)
        if len_lines < 2:
            return lines
        save_lines = [True for i in range(len_lines)]
        for i in range(len_lines-1):
            if not save_lines[i]:
                continue
            for j in range(i+1, len_lines):
                if (not save_lines[j]):
                    continue
                # print(i,j,save_lines[i], save_lines[j])
                flag = 0
                if isinstance(lines[i], dict):
                    l1, l2 = lines[i]['points'], lines[j]['points']
                else:
                    l1, l2 = lines[i], lines[j]
                if l1l2error(l1[0],l2[0]) < eps1:
                    l1 = l1[::-1]
                    flag = 1
                elif l1l2error(l1[0],l2[-1]) < eps1:
                    l1 = l1[::-1]
                    l2 = l2[::-1]
                    flag = 1
                elif l1l2error(l1[-1],l2[0]) < eps1:
                    flag = 1
                elif l1l2error(l1[-1],l2[-1]) < eps1:
                    l2 = l2[::-1]
                    flag = 1
                else:
                    flag = 0
                if flag==1:
                    x11,y11 = l1[-2][:2]
                    x12,y12 = l1[-1][:2]
                    x21,y21 = l2[0][:2]
                    x22,y22 = l2[1][:2]
                    v1 = np.array([x12 - x11, y12 - y11])
                    v2 = np.array([x22 - x21, y22 - y21])
                    # 两个向量
                    Lx=np.linalg.norm(v1,ord=2)
                    Ly=np.linalg.norm(v2,ord=2)
                    cos_angle=min(v1.dot(v2)/(Lx*Ly),1.0)
                    r=np.arccos(cos_angle)
                    if abs(r) >= reps1 * np.pi / 180:
                        continue
                    if (abs(r) >= reps2 * np.pi / 180) and (l1l2error(l1[-1],l2[0]) >= eps2):
                        continue
                    # if (l1l2error(l1[-1],l2[0]) >= eps2):
                    #     continue
                    l2[0][:3] = (l2[0][:3] + l1[-1][:3] ) / 2
                    l1 = np.concatenate([l1, l2], 0)
                    if isinstance(lines[i], dict):
                        lines[i]['points'] = l1
                    else:
                        lines[i] = l1
                    # print(i,j)
                    save_lines[j] = False
            lin.append(lines[i])
        if save_lines[-1]:
            lin.append(lines[-1])
        return lin

    def get_map(self,vec_lines):
        if isinstance(vec_lines, str):
            with open(self.data_root + vec_lines,'rb') as f:
                data = pickle.load(f)
            return data
        else:
            return vec_lines
        
    def get_sequ_idx(self, index):
        Lsort = np.arange(self.sequ_num * self.sequ_arfa)
        if not self.test_mode:
            np.random.shuffle(Lsort)
        Lsort = Lsort[:self.sequ_num-1]
        Lsort += 1
        Lsort = np.sort(Lsort)
        Lsort = Lsort.tolist()
        Lsort.insert(0,0)
        info_idx = []
        f = 0
        for select_id in range(self.sequ_num):
            _select_id = Lsort[select_id]
            select_id = max(index - _select_id, 0)
            if not self.data_infos[select_id]['id_scenes_frame'][0] \
                == self.data_infos[index]['id_scenes_frame'][0]:
                info_idx.append(index-f)
            else:
                info_idx.append(select_id)
                f += 1
        return info_idx

    def get_road_edges(self, road_edge):
        road_edge = self.comb_lines(road_edge)
        pts = []
        if road_edge is None:
            return pts
        for re in road_edge:
            pt=np.insert(re['points'],3, values=-1, axis=1)
            pts.append(pt)
        return pts
    def get_lane_dividers(self, lane_divider):
        lane_divider = self.comb_lines(lane_divider)
        pts = []
        type_dict = {'dashed': 0,
                     "solid": 1,
                     "dashed_dashed": 0, 
                     "solid_solid": 1, 
                     "solid_dashed": 1,
                     "dashed_solid": 1 }
        if lane_divider is None:
            return pts
        for re in lane_divider:
            if len(re['points'][0]) == 3:
                pt=np.insert(re['points'],3, 
                                values=type_dict[re['line_type']],
                                axis=1)
            else:
                pt = re['points']
            pts.append(pt)
        return pts

    def get_center_lines(self, lane_divider):
        lane_divider = self.comb_lines(lane_divider)
        pts = []
        if lane_divider is None:
            return pts
        for re in lane_divider:
            pt=np.insert(re['points'], 3, values=-1, axis=1)
            pts.append(pt)
        return pts
    def get_road_markings(self, road_marking):
        Ltype = []
        pts = []
        for k, func in enumerate(['crosswalk','stop_line','speed_bump','no_stop','turn']):
            pts_ = getattr(self, 'get_road_markings_%s' % func)(road_marking)
            pts.extend(pts_)
            Ltype.extend(np.ones(len(pts_))*k)
        return pts, Ltype
        
    def _get_road_markings(self, road_marking, filter_cls: list):
        pts = []
        if road_marking is None:
            return pts
        for re in road_marking:
            if re['category'][0] in filter_cls:
                if re.get("polygon", None) is not None:
                    pts_ = re['polygon']
                    pts_.append(pts_[0])
                    pt=np.insert(pts_, 3, values=-1, axis=1)
                    pts.append(pt)
                else:
                    pts_ = re['edge1']
                    pts.append(np.insert(pts_, 3, values=-1, axis=1))
                    pts_ = re['edge2']
                    pts.append(np.insert(pts_, 3, values=-1, axis=1))
        return pts

    def get_road_markings_crosswalk(self, road_marking):
        filter_cls = ['crosswalk']
        return self._get_road_markings(road_marking, filter_cls)

    def get_road_markings_stop_line(self, road_marking):
        filter_cls = ['stop_line']
        return self._get_road_markings(road_marking, filter_cls)

    def get_road_markings_speed_bump(self, road_marking):
        filter_cls = ['speed_bump']
        return self._get_road_markings(road_marking, filter_cls)

    def get_road_markings_no_stop(self, road_marking):
        filter_cls = ['no_stop']
        return self._get_road_markings(road_marking, filter_cls)

    def get_road_markings_turn(self, road_marking):
        filter_cls = {'left': 1e5, 
                      "straight": 1e4,
                      "right": 1e3,
                      "u_turn": 1e2,
                      'confluence_left': 1e1,
                      'confluence_right': 1e0}
        pts = []
        if road_marking is None:
            return pts
        for re in road_marking:
            ty = 0
            for r in re['category']:
                if r in filter_cls.keys():
                    ty += filter_cls[r]
            if ty > 0:
                if re.get("polygon", None) is not None:
                    pts_ = re['polygon']
                    pts_.append(pts_[0])
                    pt=np.insert(pts_, 3, values=ty, axis=1)
                    pts.append(pt)
                else:
                    pts_ = re['edge1']
                    pts.append(np.insert(pts_, 3, values=-1, axis=1))
                    pts_ = re['edge2']
                    pts.append(np.insert(pts_, 3, values=-1, axis=1))
        return pts

    def get_func(self, vec_lines, func):
        return getattr(self, 'get_%s' % func)(vec_lines.get(func, None))

    def get_lane_gt(self, vec_lines):
        """
        return :
            lines_gt_pts: line_id x y z type
            road_edges, lane_dividers, double_dividers,
        """
        Ltype = []
        pts = []
        if vec_lines is None:
            vec_lines = {}
        # distance_div = [250,250]
        for k, func in enumerate(['road_edges','lane_dividers','center_lines']):
            pts_ = self.get_func(vec_lines, func)
            pts.extend(pts_)
            Ltype.extend(np.ones(len(pts_))*k)
    
        _pts, _Ltype = self.get_road_markings(vec_lines.get('road_markings', None))
        _Ltype = [i+k+1 for i in _Ltype]

        pts.extend(_pts)
        Ltype.extend(_Ltype)

        for i in range(len(pts)):
            if len(pts[i]) > 0:
                pts[i] = np.insert(pts[i], 0, values = i, axis=1)
                pts[i] = np.insert(pts[i], 5, values = Ltype[i], axis=1)
            # input_dict['lines_gt_pts'] = np.concatenate(gt_pts)
        pts = np.concatenate(pts).astype(np.float32) if len(pts) > 0 else np.array(pts)
        
        return pts







