import random
import math
import os
from os import path as osp
import cv2
import tempfile
import copy
import prettytable

import numpy as np
import torch
from torch.utils.data import Dataset
import pyquaternion
from shapely.geometry import LineString


import mmcv
from mmcv.utils import print_log
from mmdet.datasets import DATASETS
from mmdet.datasets.pipelines import Compose
from .utils import (
    draw_lidar_bbox3d_on_img,
    draw_lidar_bbox3d_on_bev,
)

import tempfile
from os import path as osp

import pickle, os
import copy,json
from datetime import datetime
import pytz
import SharedArray as sa
import osmnx as ox

class HJDataset(object):
    """
    ego_future: 16(8s * 2Hz) * 9(mask, 相对stamp, x, y, z, heading, vx, vy, vz)
    obj_future: 16 * 9(同上，xyz、heading、vxyz均为当前帧本车坐标系)
    e2e_cmd: ['left', 'straight', 'right'] one hot
    navi: 6(['left', 'straight', 'right'] one hot, 指令所在相对位置xy, 指令偏转弧度（左转为正，右转为负，掉头固定为pi）)
    navi_route: N * 2（utm坐标系下导航线路坐标xy）
    """
    CLASSES = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle',
               'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone',
               'barrier')
    def __init__(self,ann_file,data_root=None,**config):
        super().__init__()
        # 将self.CLASSES转换为字典
        #自车尺寸 [1.5515，-0.0515，0.19，4.637，1.871，1.48，0]
        # 原点到左后视镜1.01m，到右后视镜0.92m
        # 原点到左车门0.93m，到右车门0.84m
        if config.get("classes",None) is not None:
            self.CLASSES = config['classes'] 
        self.class_dict = {v: index for index, v in enumerate(self.CLASSES)}
        self.data_root = data_root
        self.ann_file = ann_file
        self.load_interval = config.get('load_interval', 1)
        self.test_mode = config.get('test_mode', True)
        self.data_infos = self.load_annotations(ann_file=self.ann_file)
        self.data_config = config.get('data_aug_conf',None)
        assert self.data_config is not None, 'data_aug_conf must be provided'
    def load_annotations(self, ann_file):
        """Load annotations from ann_file.
        Args:
            ann_file (str): Path of the annotation file.

        Returns:
            list[dict]: List of annotations sorted by timestamps.
        """
        data = mmcv.load(ann_file, file_format='pkl')
        data_infos = data['infos']
        self.metadata = data['metadata']
        self.version = self.metadata['version']
        self.datatype = data['datatype']
        # self.get_occes(data_infos)
        self.key_frame = None
        if 1 or not self.test_mode:
            self.filter_end_frame(data_infos)

        else:
            data_infos = data_infos[::self.load_interval]
        return data_infos
    
    def get_occes(self, data_infos):
        for d in data_infos:
            file = d['lidar_path']['inno_frl_0_raw']
            s_f = d['id_scenes_frame']
            d['occ_gt'] = False
            d['occ'] = None
            if not d['object_gt']:
                continue
            flag,file_occ, _f = self.get_occ_file(file)
            if not flag:
                continue
            if len(_f) <= 20:
                continue
            if s_f[1] % 10 == 0:
                # print(s_f[1], len(_f), file_occ)
                _idx = int(s_f[1]//10)
                if _idx < len(_f):
                    d['occ'] = [file_occ, _f[_idx]]
                    d['occ_gt'] = True
        return data_infos
        
    def filter_end_frame(self, data_infos,has_box=True, has_map=True):
        key_frame = []
        box_flag = False
        scene_pre = None
        frame_idx,_idx = [],-1
        for k, d in enumerate(data_infos):
            if (has_map and d['map_gt']) or (has_box and d['box_gt']):
            # if (d["box_gt"] and (d['has_anno'])):
                key_frame.append(k)
            if d['scene'] != scene_pre:
                _idx = 0
                frame_idx.append(_idx)
                scene_pre = d['scene']
            else:
                _idx += 1
                frame_idx.append(_idx)
        self.key_frame = np.array(key_frame)
        self.frame_idx = np.array(frame_idx).astype(np.int32)
        return self.key_frame

    def gap_frame(self, data_infos: dict):
        id_scenes_frame = [None,None]
        flag = 0
        key_frame = []
        for index in self.key_frame:
            if ((data_infos[index]['scene'] == id_scenes_frame[0]) \
                and (index == id_scenes_frame[1] + 1)):
                flag += 1
            else:
                flag = 0
            id_scenes_frame[1] = index
            id_scenes_frame[0] = data_infos[index]['scene']
            if flag ==self.load_interval:
                flag = 0
            d = data_infos[index]
            #if data_infos[index]['map_gt'] and data_infos[index]['box_gt']:
            if (d['has_interp'] or d['has_motion'] or d['has_anno']):
                flag = 0
            if flag == 0:
                key_frame.append(index)
        self.key_frame = key_frame

    def __len__(self,):

        if self.key_frame is not None:
            return len(self.key_frame)
        else:
            return super().__len__()

    def _load_json(self,_path,_x_file):
        _path = os.path.join(_path, f'{_x_file}.json')
        if not os.path.exists(_path):
            return False
        # print(_path)
        try: # todo
            with open(_path, 'rb') as f:
                info = json.load(f)
        except:
            print(_path)
            return False
        return info

    def R_camera_check(self, cam: str):
        cam_list = cam.split('_')
        if cam_list[0] == 'R':
            return True, '_'.join(cam_list[1:-1])
        else:
            return False, cam
    
    def _load_cam_calib(self, info, _cam, calib_type, use_sharedmem=False):
        Is, data = self.get_sharedmem(f"{info['scene']}_{_cam}_{calib_type}")
        if Is:
            return data
        else:
            _path = os.path.join(self.data_root,
                                    'fisheyes',
                                    info['scene'],
                                    "calib",
                                    f"{_cam}_{calib_type}.txt")
            
            data = np.loadtxt(_path, dtype=np.float32)
            if use_sharedmem:
                self.set_sharedmem(data, f"{info['scene']}_{_cam}_{calib_type}")
        return data
    def _load_cam_ego(self,info,_cam,use_sharedmem=False):
        return self._load_cam_calib(info,_cam,'ego',use_sharedmem)

    def _load_cam_intrinsic(self,info,_cam,use_sharedmem=False):
        return self._load_cam_calib(info,_cam,'intrinsic',use_sharedmem)

    def _load_cam_distortion(self,info,_cam,use_sharedmem=False):
        calib_type = "distortion"
        Is, data = self.get_sharedmem(f"{info['scene']}_{_cam}_{calib_type}")
        if Is:
            return data
        else:
            data = np.zeros(10,dtype=np.float32)
            if _cam.find('fisheye') >0:
                _path = os.path.join(self.data_root,
                                        'fisheyes',
                                        info['scene'],
                                        "calib",
                                        f"{_cam}_{calib_type}.txt")
                data[0] = 1
                data[1:] = np.loadtxt(_path, dtype=np.float32)[:9]
            if use_sharedmem:
                self.set_sharedmem(data, f"{info['scene']}_{_cam}_{calib_type}")
        return data

    def _load_cams(self, info, sync_info, use_sharedmem=False):
        cams = self.data_config['cams']
        _sync_info = sync_info['cameras']
        info['cams'] = {}
        for cam in cams:
            cam_r_flag, _cam = self.R_camera_check(cam)
            info['cams'][cam] = {}
            # load img path
            img_path = os.path.join(self.data_root,
                                    'fisheyes',
                                    info['scene'],
                                    "cameras",
                                    _cam,
                                    f"{_sync_info[_cam]}.jpg")
            info['cams'][cam]['data_path'] = img_path
            info['cams'][cam]['type'] = _cam
            info['cams'][cam]['sensor2ego'] = self._load_cam_ego(info,_cam,use_sharedmem)
            info['cams'][cam]['cam_intrinsic'] = self._load_cam_intrinsic(info,_cam,use_sharedmem) 
            info['cams'][cam]['cam_distortion'] = self._load_cam_distortion(info,_cam,use_sharedmem) 
        return info

    def _load_ego(self,info, use_sharedmem=False):

        Is,data = self.get_sharedmem(f"{info['scene']}_{info['frame']}_egomotion")
        if Is:
            info['ego2global'] = data
            return info
        _path = os.path.join(self.data_root,
                                    'fisheyes',
                                    info['scene'],
                                    'ego_motion',
                                    f"{info['frame']}.txt")
        if os.path.exists(_path):
            info['ego2global'] = np.loadtxt(_path,dtype=np.float32)
            if use_sharedmem:
                self.set_sharedmem(info['ego2global'],f"{info['scene']}_{info['frame']}_egomotion")
        else:
            print(_path)
        return info
    def _load_occ(self,info):
        # todo
        pass
    def _load_map(self, info):
        _path = os.path.join(self.data_root,
                                    'fisheyes', # select  "map_hrb"
                                    info['scene'],
                                    'map_anno')
        if info['lane_gt']:
            info['vec_lines'] = self._load_json(_path, info['frame'])
            info['gt_maps'] = self.get_lane_gt(info['vec_lines'])
        return info

    def decode_scene(self, scenes, timestamp):
        frame_type = self.datatype
        if np.issubdtype(frame_type['scene'],np.string_) and  frame_type['scene'].itemsize>=40:
            return scenes.astype(str)
        beijing = pytz.timezone("Asia/Shanghai")
        timeStamp = int(timestamp//1e6)
        utc_date = datetime.utcfromtimestamp(timeStamp)
        utc_loc_time = utc.localize(utc_date)
        beijing_time = utc_loc_time.astimezone(geijing)
        otherStyleTime = beijing_time.strftime("%Y_%m_%d_%H_%M_%S")
        if np.issubdtype(frame_type, np.integer):
            return otherStyleTime + '_' + self.data['scene_suffix_num_name'][scenes]
        else:
            return otherStyleTime + '_' + scenes

    def get_box(self, ann_info):
        boxes = []
        ca = []
        inst_id = []
        for ann in ann_info:
            center = ann['center']
            size = ann['size']
            rotation = ann['rotation']
            if 'velocity' in ann:
                velocity = [ann['velocity']['x'],
                ann['velocity']['y']]
            else:
                velocity = [np.nan,np.nan]
            inst_id.append(self.id_str2num(ann['id']))
            box = np.array([center['x'], center['y'], center['z'],
                            size['x'], size['y'], size['z'], 
                            rotation['z'],velocity[0],velocity[1]])
            boxes.append(box.astype(np.float32))
            if ann['category'] not in self.class_dict:
                ca.append(0)
            else:
                ca.append(self.class_dict[ann['category']]+1)
        if len(boxes) >0:
            boxes = np.stack(boxes)
        else:
            boxes = np.array(boxes).reshape(-1,9)
        return boxes,np.array(ca,dtype=np.int16),np.array(inst_id,dtype=np.int64)

    def id_str2num(self, id_str):
        id_num = ''
        for i in range(len(id_str)):
            if id_str[i].isdigit():
                id_num += id_str[i]
            if len(id_num) >= 10:
                break
        return int(id_num)
    def _load_gt_box_X(self, info, gt_type):
        _path = os.path.join(self.data_root,
                                    'fisheyes',
                                    info['scene'],
                                    gt_type)
        info['ann_infos'] = self._load_json(_path, info['frame'])
        return info['ann_infos']
    def _load_object(self, info,use_sharedmem=False):
        if info['object_gt']:
            Is,box = self.get_sharedmem(f"{info['scene']}_{info['frame']}_box")
            if Is:
                Is,box_cat = self.get_sharedmem(f"{info['scene']}_{info['frame']}_cat")
                Is,box_id = self.get_sharedmem(f"{info['scene']}_{info['frame']}_id")
                info['ann_infos'] = (box,box_cat,box_id)
                return info
            info['ann_infos'] = self._load_gt_box_X(info, 'obj_motion')
            if not info['ann_infos']:
                info['ann_infos'] = self._load_gt_box_X(info, 'obj_anno')
            if not info['ann_infos']:
                info['ann_infos'] = self._load_gt_box_X(info, 'obj_interp')

            info['ann_infos'] = self.get_box(info['ann_infos'])
            if use_sharedmem:
                self.set_sharedmem(info['ann_infos'][0], f"{info['scene']}_{info['frame']}_box")
                self.set_sharedmem(info['ann_infos'][1], f"{info['scene']}_{info['frame']}_cat")
                self.set_sharedmem(info['ann_infos'][2], f"{info['scene']}_{info['frame']}_id")
            
        return info
    def set_sharedmem(self,data,name):
        try:
            a = sa.create(f"shm://{name}",data.shape,dtype=data.dtype)
            a[:] = data
        except:
            return False
        return True
    def get_sharedmem(self,name):
        try:
            a = sa.attach(f"shm://{name}")
            return True, a
        except:
            return False,None
    def _load_sync(self,info,use_sharedmem=False):
        _path = os.path.join(self.data_root,'fisheyes',info['scene'],"sync_info")
        Is,data = self.get_sharedmem(f"{info['frame']}_sync")
        if Is:
            return data
        sync_info = self._load_json(_path, info['frame'])
        return sync_info

    def _load_pkl(self,_path,filename):
        with open(os.path.join(_path, filename+'.pkl'), 'rb') as f:
            data = pickle.load(f)
        return data

    def _load_status(self,info,ego_type,use_sharedmem=False):
        Is,data = self.get_sharedmem(f"{info['scene']}_{ego_type}")
        if Is:
            return data
        _path = os.path.join(self.data_root,'e2e_info',info['scene'],ego_type+'.npy')
        if os.path.exists(_path):
            data = np.load(_path)
        else:
            print(f"{_path} not exists")
        # data = np.load(os.path.join(self.data_root,'e2e_info',info['scene'],ego_type+'.npy'))
        if use_sharedmem:
            self.set_sharedmem(data, f"{info['scene']}_{ego_type}")
        return data
    
    def _load_e2e_pkl(self,info,ego_type):
        _path = os.path.join(self.data_root,'e2e_info',info['scene'],ego_type+'.pkl')
        if os.path.exists(_path):
            with open(_path, 'rb') as f:
                data = pickle.load(f)
        else:
            print(f"{_path} not exists")
            data = None
        return data
    def _load_e2e_pkl_navi(self,info,ego_type,use_sharedmem=False):
        frame = info['frame']
        Is,data = self.get_sharedmem(f"{info['scene']}_{ego_type}_{frame}_navi")
        if Is:
            return data
        data = self._load_e2e_pkl(info,ego_type)
        if data is None:
            return None
        if use_sharedmem:
            self.set_sharedmem(data, f"{info['scene']}_{ego_type}_{frame}_navi")
        return data
    def _load_e2e_pkl_box(self,info,ego_type,use_sharedmem=False):
        frame = info['frame']
        Is_k,data_k = self.get_sharedmem(f"{info['scene']}_{ego_type}_{frame}_k")
        Is_v,data_v = self.get_sharedmem(f"{info['scene']}_{ego_type}_{frame}_v")
        if Is_k and Is_v:
            return data_k,data_v
        data = self._load_e2e_pkl(info,ego_type)
        if data is None:
            return None,None
        if use_sharedmem:
            for _k in data.keys():
                _data = data[_k]
                if not _data:
                    continue
                kv = [[k,v] for k,v in _data.items()]
                k_data = np.stack([self.id_str2num(s[0]) for s in kv], 0)
                v_data = np.stack([s[1] for s in kv], 0)
                k_data = k_data.astype(np.int64)
                v_data = v_data.astype(np.float32)
                self.set_sharedmem(k_data, f"{info['scene']}_{ego_type}_{_k}_k")
                self.set_sharedmem(v_data, f"{info['scene']}_{ego_type}_{_k}_v")
        Is_k,data_k = self.get_sharedmem(f"{info['scene']}_{ego_type}_{frame}_k")
        Is_v,data_v = self.get_sharedmem(f"{info['scene']}_{ego_type}_{frame}_v")
        if not Is_k or not Is_v:
            print(f"{info['scene']}_{ego_type}_{frame}_k or {info['scene']}_{ego_type}_{frame}_v not exists")
        return data_k,data_v
    def _load_motion_status(self,info,use_sharedmem=False):
        """
            e2e_motion: 7(vx, vy, vz, yaw_rate, ax, ay, az)
        """
        frame_idx = info['frame_idx']
        data_motion = self._load_status(info,'e2e_motion',use_sharedmem)[frame_idx]
        data_motion = data_motion.astype(np.float32)
        info['motion_status'] = data_motion
        return info
    def _load_cmd_status(self,info,use_sharedmem=False):
        """
        e2e_cmd: ['left', 'straight', 'right'] one hot
        """
        frame_idx = info['frame_idx']
        cmd_info = self._load_status(info,'e2e_cmd',use_sharedmem)[frame_idx]
        cmd_info = cmd_info.astype(np.float32)
        info['cmd_status'] = cmd_info
        return info
    def _load_box_trajs(self,info,use_sharedmem=False):
        """
        obj_future: 16 * 9(同上，xyz、heading、vxyz均为当前帧本车坐标系)
        """
        frame_idx = info['frame_idx']
        box_info = [None,None]
        if info['object_gt']:
            box_info = self._load_e2e_pkl_box(info,'e2e_obj_future',use_sharedmem)
        if box_info[0] is None:
            box_info = [np.ones(1)*-1000, np.ones(shape=(1,16,9))*-1000]
            box_info[0] = box_info[0].astype(np.int64)
            box_info[1] = box_info[1].astype(np.float32)
        info['gt_agent_ids'] = box_info[0] 
        info['gt_agent_fut_trajs'] = box_info[1]
        return info

    def _load_ego_trajs(self,info,use_sharedmem=False):
        """
        ego_future: 16(8s * 2Hz) * 9(mask, 相对stamp, x, y, z, heading, vx, vy, vz)
        """
        frame_idx = info['frame_idx']
        data = self._load_status(info,'e2e_ego_future',use_sharedmem)[frame_idx]
        data = data.astype(np.float32)
        info['gt_ego_fut_trajs'] = data
        return info
    def _load_navi_status(self,info,use_sharedmem=False):
        """
        navi: 6(['left', 'straight', 'right'] one hot, 指令所在相对位置xy, 指令偏转弧度（左转为正，右转为负，掉头固定为pi）)
        navi_route: N * 2（utm坐标系下导航线路坐标xy）
        """
        data_navi = self._load_e2e_pkl_navi(info,'e2e_navi',use_sharedmem)
        data_navi_route = self._load_status(info,'e2e_navi_route',use_sharedmem)
        # data_navi = data_navi.astype(np.float32)
        info['navi_status'] = data_navi
        info['navi_route'] = data_navi_route
        return info
    def _load_sdmap(self,info,use_sharedmem=False):
        """
        lane: 4 * 2 * 2(utm坐标系下车道线坐标xy)
        """
        _path = os.path.join(self.data_root,'e2e_info',info['scene'],'e2e_sdmap.graphml')
        if os.path.exists(_path):
            data = ox.load_graphml(_path)
        else:
            print(f"{_path} not exists")
            data = None
        info['sd_graph'] = data
        return info                     

    def _get_ego2img(self, info):
        cams = self.data_config['cams']
        for cam in cams:
            cam2ego = info['cams'][cam]['sensor2ego']
            intrinsic = info['cams'][cam]['cam_intrinsic'] 
            _tmp = np.eye(4)
            _tmp[: 3, : 3] = intrinsic
            ego2img = _tmp @ cam2ego.T
            info['cams'][cam]['ego2img'] = ego2img
        return info
    def decode_info(self, index: int):
        info = copy.deepcopy(self.data_infos[index])
        scene = self.decode_scene(info['scene'],info['frame'])

        _info = dict()
        _info['scene'] = scene
        _info['frame'] = info['frame']
        _info['object_gt'] = info['box_gt']
        _info['lane_gt'] = info['map_gt']
        _info['frame_idx'] = self.frame_idx[index]
        return _info
    def load_input_dict(self, info):
        sync_info = self._load_sync(info)
        self._load_cams(info, sync_info, True)
        # info = self.load_lidar(info) 
        self._load_ego(info,True)
        # info = self._load_occ(info)
        self._load_map(info)
        self._load_object(info,True)
        self._load_cmd_status(info,True)
        self._load_motion_status(info,True)
        self._load_ego_trajs(info,True)
        self._load_box_trajs(info,True)
        # self._load_navi_status(info,True)
        # self._load_sdmap(info)
        return info

    def get_data_info(self, index):

        info = self.decode_info(index)
        info = self.load_input_dict(info)
        info['timestamp'] = info['frame']/ 1e6
        return info

    def comb_lines(self, lines,eps1=3.0, eps2=1.0, reps1=120, reps2=10.0):
        return lines
        if lines is None:
            return lines
        def l1l2error(l1,l2):
            return ((l1[0] - l2[0])**2 + (l1[1] - l2[1])**2)**(0.5)
        lin = []
        len_lines = len(lines)
        if len_lines < 2:
            return lines
        save_lines = [True for i in range(len_lines)]
        for i in range(len_lines-1):
            if not save_lines[i]:
                continue
            for j in range(i+1, len_lines):
                if (not save_lines[j]):
                    continue
                # print(i,j,save_lines[i], save_lines[j])
                flag = 0
                if isinstance(lines[i], dict):
                    l1, l2 = lines[i]['points'], lines[j]['points']
                else:
                    l1, l2 = lines[i], lines[j]
                if l1l2error(l1[0],l2[0]) < eps1:
                    l1 = l1[::-1]
                    flag = 1
                elif l1l2error(l1[0],l2[-1]) < eps1:
                    l1 = l1[::-1]
                    l2 = l2[::-1]
                    flag = 1
                elif l1l2error(l1[-1],l2[0]) < eps1:
                    flag = 1
                elif l1l2error(l1[-1],l2[-1]) < eps1:
                    l2 = l2[::-1]
                    flag = 1
                else:
                    flag = 0
                if flag==1:
                    x11,y11 = l1[-2][:2]
                    x12,y12 = l1[-1][:2]
                    x21,y21 = l2[0][:2]
                    x22,y22 = l2[1][:2]
                    v1 = np.array([x12 - x11, y12 - y11])
                    v2 = np.array([x22 - x21, y22 - y21])
                    # 两个向量
                    Lx=np.linalg.norm(v1,ord=2)
                    Ly=np.linalg.norm(v2,ord=2)
                    cos_angle = v1.dot(v2)/(Lx*Ly)
                    cos_angle = min(cos_angle,1.0)
                    cos_angle = max(cos_angle, -1.0)
                    r=np.arccos(cos_angle)
                    if abs(r) >= reps1 * np.pi / 180:
                        continue
                    if (abs(r) >= reps2 * np.pi / 180) and (l1l2error(l1[-1],l2[0]) >= eps2):
                        continue
                    # if (l1l2error(l1[-1],l2[0]) >= eps2):
                    #     continue
                    l2[0][:3] = (l2[0][:3] + l1[-1][:3] ) / 2
                    l1 = np.concatenate([l1, l2], 0)
                    if isinstance(lines[i], dict):
                        lines[i]['points'] = l1
                    else:
                        lines[i] = l1
                    # print(i,j)
                    save_lines[j] = False
            lin.append(lines[i])
        if save_lines[-1]:
            lin.append(lines[-1])
        return lin

    def get_map(self, vec_lines):
        if isinstance(vec_lines, str):
            with open(self.data_root + vec_lines,'rb') as f:
                data = pickle.load(f)
            return data
        else:
            return vec_lines
        
    def get_sequ_idx(self, index):
        Lsort = np.arange(self.sequ_num * self.sequ_arfa)
        if not self.test_mode:
            np.random.shuffle(Lsort)
        Lsort = Lsort[:self.sequ_num-1]
        Lsort += 1
        Lsort = np.sort(Lsort)
        Lsort = Lsort.tolist()
        Lsort.insert(0,0)
        info_idx = []
        f = 0
        for select_id in range(self.sequ_num):
            _select_id = Lsort[select_id]
            select_id = max(index - _select_id, 0)
            if not self.data_infos[select_id]['scene'] \
                == self.data_infos[index]['scene']:
                info_idx.append(index-f)
            else:
                info_idx.append(select_id)
                f += 1
        return info_idx

    def get_road_edges(self, road_edge):
        road_edge = self.comb_lines(road_edge)
        pts = []
        if road_edge is None:
            return pts
        for re in road_edge:
            pt=np.insert(re['points'],3, values=-1, axis=1)
            pts.append(pt)
        return pts
    
    def get_lane_dividers(self, lane_divider):
        lane_divider = self.comb_lines(lane_divider)
        pts = []
        type_dict = {'dashed': 0,
                     "solid": 1,
                     "dashed_dashed": 0, 
                     "solid_solid": 1, 
                     "solid_dashed": 1,
                     "dashed_solid": 1 }
        if lane_divider is None:
            return pts
        for re in lane_divider:
            if len(re['points'][0]) == 3:
                pt=np.insert(re['points'],3, 
                                values=type_dict[re['line_type']],
                                axis=1)
            else:
                pt = re['points']
            pts.append(pt)
        return pts

    def get_center_lines(self, lane_divider):
        lane_divider = self.comb_lines(lane_divider)
        pts = []
        if lane_divider is None:
            return pts
        for re in lane_divider:
            pt=np.insert(re['points'], 3, values=-1, axis=1)
            pts.append(pt)
        return pts
    def get_road_markings(self, road_marking):
        Ltype = []
        pts = []
        for k, func in enumerate(['crosswalk','stop_line','speed_bump','no_stop','turn']):
            pts_ = getattr(self, 'get_road_markings_%s' % func)(road_marking)
            pts.extend(pts_)
            Ltype.extend(np.ones(len(pts_))*k)
        return pts, Ltype
        
    def _get_road_markings(self, road_marking, filter_cls: list):
        pts = []
        if road_marking is None:
            return pts
        for re in road_marking:
            if re['category'][0] in filter_cls:
                if re.get("polygon", None) is not None:
                    pts_ = re['polygon']
                    pts_.append(pts_[0])
                    pt=np.insert(pts_, 3, values=-1, axis=1)
                    pts.append(pt)
                else:
                    pts_ = re['edge1']
                    pts.append(np.insert(pts_, 3, values=-1, axis=1))
                    pts_ = re['edge2']
                    pts.append(np.insert(pts_, 3, values=-1, axis=1))
        return pts

    def get_road_markings_crosswalk(self, road_marking):
        filter_cls = ['crosswalk']
        return self._get_road_markings(road_marking, filter_cls)

    def get_road_markings_stop_line(self, road_marking):
        filter_cls = ['stop_line']
        return self._get_road_markings(road_marking, filter_cls)

    def get_road_markings_speed_bump(self, road_marking):
        filter_cls = ['speed_bump']
        return self._get_road_markings(road_marking, filter_cls)

    def get_road_markings_no_stop(self, road_marking):
        filter_cls = ['no_stop']
        return self._get_road_markings(road_marking, filter_cls)

    def get_road_markings_turn(self, road_marking):
        filter_cls = {'left': 1e5, 
                      "straight": 1e4,
                      "right": 1e3,
                      "u_turn": 1e2,
                      'confluence_left': 1e1,
                      'confluence_right': 1e0}
        pts = []
        if road_marking is None:
            return pts
        for re in road_marking:
            ty = 0
            for r in re['category']:
                if r in filter_cls.keys():
                    ty += filter_cls[r]
            if ty > 0:
                if re.get("polygon", None) is not None:
                    pts_ = re['polygon']
                    pts_.append(pts_[0])
                    pt=np.insert(pts_, 3, values=ty, axis=1)
                    pts.append(pt)
                else:
                    pts_ = re['edge1']
                    pts.append(np.insert(pts_, 3, values=-1, axis=1))
                    pts_ = re['edge2']
                    pts.append(np.insert(pts_, 3, values=-1, axis=1))
        return pts

    def get_func(self, vec_lines, func):
        return getattr(self, 'get_%s' % func)(vec_lines.get(func, None))

    def get_lane_gt(self, vec_lines):
        """
        return :
            lines_gt_pts: line_id x y z type
            road_edges, lane_dividers, double_dividers,
        """
        Ltype = []
        pts = []
        if vec_lines is None:
            vec_lines = {}
        # distance_div = [250,250]
        for k, func in enumerate(['road_edges','lane_dividers','center_lines']):
            pts_ = self.get_func(vec_lines, func)
            pts.extend(pts_)
            Ltype.extend(np.ones(len(pts_))*k)
    
        _pts, _Ltype = self.get_road_markings(vec_lines.get('road_markings', None))
        _Ltype = [i+k+1 for i in _Ltype]

        pts.extend(_pts)
        Ltype.extend(_Ltype)

        for i in range(len(pts)):
            if len(pts[i]) > 0:
                pts[i] = np.insert(pts[i], 0, values = i, axis=1)
                pts[i] = np.insert(pts[i], 5, values = Ltype[i], axis=1)
            # input_dict['lines_gt_pts'] = np.concatenate(gt_pts)
        pts = np.concatenate(pts).astype(np.float32) if len(pts) > 0 else np.array(pts)
        
        return pts
    
    def get_pakage_occ(self, file):
        # a = 'data/hj_dataset/' + file
        a = file.split('/')
        a.pop(2)
        a[1] += '_occ'
        a = a[:4]
        return  osp.join(*a)

    def get_occ_file(self, file, occ_version='occ_gt_v4'):
        # file = input_dict['pts_filename']
        file_occ = self.get_pakage_occ(file)
        if not osp.exists(file_occ):
            return False, 0, 0
        _path = osp.join(file_occ,occ_version)
        if not osp.exists(_path):
            return False,0, 0
        if occ_version=='occ_gt_v1':
            _path = osp.join(_path,'dynamic_object_occ')
            if not osp.exists(_path):
                return False, 0, 0
        _file = os.listdir(_path)
        _file.sort()
        return True, file_occ, _file
    


@DATASETS.register_module()
class Hj_dataset(Dataset):
    CLASSES = (
        "car",
        "truck",
        "trailer",
        "bus",
        "construction_vehicle",
        "bicycle",
        "motorcycle",
        "pedestrian",
        "traffic_cone",
        "barrier",
    )
    MAP_CLASSES = (
        'road_edge',
        'lane',
        'center_lines',
        'crosswalk',
        'stop_line',
        'speed_bump',
        'no_stop',
        'turn',
    )
    ID_COLOR_MAP = [
        (59, 59, 238),
        (0, 255, 0),
        (0, 0, 255),
        (255, 255, 0),
        (0, 255, 255),
        (255, 0, 255),
        (255, 255, 255),
        (0, 127, 255),
        (71, 130, 255),
        (127, 127, 0),
    ]

    def __init__(
        self,
        ann_file,
        pipeline=None,
        data_root=None,
        **config,
    ):
        self.version = config["version"]
        self.load_interval = config.get("load_interval", 1)
        self.use_valid_flag = config.get("use_valid_flag", False)
        super().__init__()
        self.data_root = data_root
        self.ann_file = ann_file
        self.test_mode = config.get("test_mode", False)
        self.modality = config["modality"]
        self.box_mode_3d = 0

        self.CLASSES = config.get("classes", self.CLASSES)
        self.map_classes = config.get("map_classes", self.MAP_CLASSES)
        self.cat2id = {name: i for i, name in enumerate(self.CLASSES)}
        self.hjdataset = HJDataset(ann_file,
                                 data_root,
                                 **config,)

        if pipeline is not None:
            self.pipeline = Compose(pipeline)

        self.with_velocity = config.get("with_velocity", False)

        if self.modality is None:
            self.modality = dict(
                use_camera=False,
                use_lidar=True,
                use_radar=False,
                use_map=False,
                use_external=False,
            )
        self.vis_score_threshold = config.get("vis_score_threshold", 0.3)

        self.data_aug_conf = config.get("data_aug_conf", None)  
        self.sequences_split_num = config.get("sequences_split_num", 1)
        self.keep_consistent_seq_aug = config.get("keep_consistent_seq_aug", False)
        if config.get('with_seq_flag'):
            self._set_sequence_group_flag()



    def _set_sequence_group_flag(self):
        """
        Set each sequence to be a different group
        """
        data_infos = self.hjdataset.data_infos
        key_frame = self.hjdataset.key_frame
        if self.sequences_split_num == -1:
            self.flag = np.arange(len(data_infos))
            return
        
        res = []
        pre_scene = None
        curr_sequence = -1
        for _idx in range(len(key_frame)):
            idx = key_frame[_idx]
            if  data_infos[idx]["scene"] != pre_scene:
                # Not first frame and # of sweeps is 0 -> new sequence
                curr_sequence += 1
                pre_scene = data_infos[idx]["scene"]
            res.append(curr_sequence)

        self.flag = np.array(res, dtype=np.int64)

        if self.sequences_split_num != 1:
            if self.sequences_split_num == "all":
                self.flag = np.array(
                    range(len(self)), dtype=np.int64
                )
            else:
                bin_counts = np.bincount(self.flag)
                new_flags = []
                curr_new_flag = 0
                for curr_flag in range(len(bin_counts)):
                    curr_sequence_length = np.array(
                        list(
                            range(
                                0,
                                bin_counts[curr_flag],
                                math.ceil(
                                    bin_counts[curr_flag]
                                    / self.sequences_split_num
                                ),
                            )
                        )
                        + [bin_counts[curr_flag]]
                    )

                    for sub_seq_idx in (
                        curr_sequence_length[1:] - curr_sequence_length[:-1]
                    ):
                        for _ in range(sub_seq_idx):
                            new_flags.append(curr_new_flag)
                        curr_new_flag += 1

                assert len(new_flags) == len(self.flag)
                assert (
                    len(np.bincount(new_flags))
                    == len(np.bincount(self.flag)) * self.sequences_split_num
                )
                self.flag = np.array(new_flags, dtype=np.int64)

    def sample_augmentation(self, cam=None):
        data_config = self.data_aug_conf
        H,W = data_config['src_size'][cam]
        fH, fW = data_config['input_size']
        if not self.test_mode:
            resize = float(fW) / float(W)
            resize += np.random.uniform(*data_config['resize'][cam])
            resize_dims = (int(W * resize), int(H * resize))
            newW, newH = resize_dims
            crop_h = int((1 - np.random.uniform(*data_config['crop_h'][cam])) *
                         newH) - fH
            # crop_w = int(np.random.uniform(0, max(0, newW - fW)))
            crop_w = int(max(0, newW - fW) / 2)
            crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)
            flip = data_config['flip'] and np.random.choice([0, 1])
            rotate = np.random.uniform(*data_config['rot'])
        else:
            resize = float(fW) / float(W)
            resize += data_config['resize_test'][cam]
            resize_dims = (int(W * resize), int(H * resize))
            newW, newH = resize_dims
            crop_h = int((1 - np.mean(data_config['crop_h'][cam])) * newH) - fH
            crop_w = int(max(0, newW - fW) / 2)
            crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)
            flip = False 
            rotate = 0
        return resize, resize_dims, crop, flip, rotate
    
    def get_augmentation(self):
        if self.data_aug_conf is None:
            return None
        resize, resize_dims, crop = {}, {}, {}
        flip, rotate, rotate_3d = {}, {}, None
        for cam in self.data_aug_conf['cams']:
            resize[cam], resize_dims[cam], crop[cam], flip[cam], rotate[cam] = \
                self.sample_augmentation(cam)
        rotate_3d = np.random.uniform(*self.data_aug_conf['rot_3d'])
        flip_dx = np.random.rand() < self.data_aug_conf['flip_dx_3d']
        flip_dy = np.random.rand() < self.data_aug_conf['flip_dy_3d']
        scale_3d =np.random.uniform(*self.data_aug_conf['scale_3d'])
        aug_config = {
            "resize": resize,
            "resize_dims": resize_dims,
            "crop": crop,
            "flip": flip,
            "rotate": rotate,
            "rotate_3d": rotate_3d,
            "scale_3d": scale_3d,
            'flip_dx_3d': flip_dx,
            'flip_dy_3d': flip_dy
        }
        return aug_config
    
    def __len__(self,):
        return len(self.hjdataset)

    def __getitem__(self, idx):
        if isinstance(idx, dict):
            aug_config = idx["aug_config"]
            idx = idx["idx"]
        else:
            aug_config = self.get_augmentation()

        # frames =  [1699690417384318,                                                                                                                                                                                                                                                 
        #  1699086027672888,                                                                                                                                                                                                                                                 
        #  1699086028672907,                                                                                                                                                                                                                                                 
        #  1699086029172921,                                                                                                                                                                                                                                                 
        #  1699086028172897,                                                                                                                                                                                                                                                 
        #  1699086030672978,                                                                                                                                                                                                                                                 
        #  1699690420384392,                                                                                                                                                                                                                                                 
        #  1699690418384333,                                                                                                                                                                                                                                                 
        #  1699690419384348,                                                                                                                                                                                                                                                 
        #  1699690421384410]  
        # for frame in frames:
        #     index = np.where(self.hjdataset.data_infos['frame'] == frame)[0][0]
        #     data = self.get_data_info(index)
        #     print(f"{data['scene']} {data['frame']}, gt_box: {data['gt_bboxes_3d'].shape} - gt_agent:{data['gt_agent_fut_trajs'].shape} ")
        # idx = np.where(self.hjdataset.data_infos['frame'] == 1697698262870886)[0][0]
        data = self.get_data_info(idx)
        data["aug_config"] = aug_config
        data = self.pipeline(data)
        return data

        return info['lidar2img']
    def get_img_metas(self, idx):
        return self.get_data_info(idx)
    
    def get_ego2img(self,info):
        return self.hjdataset._get_ego2img(info)

    def get_data_info(self, index):
        # index=7513
        # self.hjdataset.211
        # index = np.where((self.hjdataset.data_infos['frame'] ==1691979601182307) * (self.hjdataset.data_infos['scene']==b'2023_08_14_10_20_00_ramp'))[0][0]
        # index = np.where((self.hjdataset.data_infos['frame'] ==1704248745168849) * (self.hjdataset.data_infos['scene']==b'2023_11_05_10_09_00_ramp'))[0][0]
        # index = np.where(self.hjdataset.data_infos['scene']==b'2023_11_05_10_09_00_ramp')[0][0]
        # index = 1199305
         
        info = self.hjdataset.get_data_info(index)

        info = self.get_ego2img(info)

        info['cams_list'] = self.data_aug_conf['cams']
        info['lidar2img'] = np.stack([info['cams'][cam]['ego2img'] for cam in info['cams_list']]) # for nuscenes define todo 
        info['cam_intrinsic'] = np.stack([info['cams'][cam]['cam_intrinsic'] for cam in info['cams_list']])
        info['cam_distortion'] = np.stack([info['cams'][cam]['cam_distortion'] for cam in info['cams_list']])
        info['cam2ego'] = np.stack([info['cams'][cam]['sensor2ego'] for cam in info['cams_list']])
        if info.get('ann_infos') is not None:
            anno = info['ann_infos']
            info["gt_bboxes_3d"] = anno[0]
            info["gt_labels_3d"] = anno[1]
            info["instance_ids"] = anno[2]
        info['lidar2global'] = info['ego2global'] 
        info['scene_id'] = info['scene']
        # if info['gt_agent_fut_trajs'] is None:
        #     print('no gt_agent_fut_trajs', index)
        return info

    def get_sample(self, index):
        return info
if __name__ == "__main__":
    for key in sa.list():
        sa.delete(str(key.name,encoding='utf-8'))