# projects/tools/data_convert/meg_converter.py
import os
from os import path as osp
import mmcv
import numpy as np
import json
from pyquaternion import Quaternion
# from projects.Meg_dataset.mmdet3d.datasets.meg_dataset import MegDataset
import math


class_names = ['car', 'truck', 'trailer', 'bus', 'construction',
                  'bicycle', 'motorcycle', 'pedestrian', 'trafficcone', 'barrier']


def get_train_val_scenes(root_path):
    """
    划分训练集和测试集
    """
    p = osp.join(root_path, 'points')
    all_scenes = os.listdir(p)  # 所有文件
    all_scenes = [scenes.split('.')[0] for scenes in all_scenes]

    # 向下取整 训练和测试数量    总数 / 10
    test_num = math.floor(len(all_scenes) / 10)  # 取 1/10 场景为测试
    
    train_num = len(all_scenes) - test_num
    
    # 取前all_scenes场景作用train， 后面的作为val_scenes
    train_scenes = all_scenes[:train_num]
    val_scenes = all_scenes[train_num:]

    return train_scenes, val_scenes  # ['0', '1', '2', ...]


def create_custom_infos(
    root_path, info_prefix
):  
    # 划分数据集， 得到名字
    train_scenes, val_scenes = get_train_val_scenes(root_path)

    # 根据划分的数据集得到info
    train_nusc_infos, val_nusc_infos = _fill_trainval_infos(root_path, train_scenes, val_scenes)

    # metadata = dict(version="v1.0-mini")
    metadata = dict(version="v1.0-trainval")

    print(
        "train sample: {}, val sample: {}".format(
            len(train_nusc_infos), len(val_nusc_infos)
        )
    )

    data = dict(infos=train_nusc_infos, metadata=metadata)
    
    info_path = osp.join(root_path, "{}_infos_train.pkl".format(info_prefix))
    mmcv.dump(data, info_path)
    
    data["infos"] = val_nusc_infos
    info_val_path = osp.join(root_path, "{}_infos_val.pkl".format(info_prefix))
    mmcv.dump(data, info_val_path)

# 读取文件 创建info
def _fill_trainval_infos(root_path, train_scenes, val_scenes, test=False):

    # 2个空的列表
    train_nusc_infos = []
    val_nusc_infos = []

    camera_types = [
                    "CAM_FRONT_LEFT",
                    "CAM_FRONT",
                    "CAM_FRONT_RIGHT",
                    "CAM_BACK_LEFT",
                    "CAM_BACK",
                    "CAM_BACK_RIGHT",
                ]
    
    # 所有的名字
    available_scene_names = train_scenes + val_scenes

    print(train_scenes)
    print(val_scenes)

    # 遍历所有的名字
    
    for i, scenes_name in enumerate(available_scene_names):
        print("\n")
        # 1 lidar路径，id，时间戳, T_ego_lidar
        frame_id = int(i)
        lidar_path = os.path.join(root_path, "points", scenes_name+'.bin')
        
        lidar_relative_path = os.path.join("points", scenes_name+'.bin')
        timestamp = str(i).zfill(10)
        
        lidar2ego_translation = np.array([0, 0, 0])
        lidar2ego_rotation = np.array(Quaternion([1, 0, 0, 0]).rotation_matrix)

        # ['lidar_path', 'token', 'sweeps', 'cams', 'lidar2ego_translation', 
        #  'lidar2ego_rotation', 'ego2global_translation', 'ego2global_rotation', 
        #  'timestamp', 'gt_boxes', 'gt_names', 'gt_velocity', 'num_lidar_pts', 
        #  'num_radar_pts', 'valid_flag', 'ann_infos', 'scene_token', 'occ_path']
        # dataset infos
        
        info = {
            "frame_id": frame_id,                   # id号
            "lidar_path": lidar_path,               # 绝对路径
            "token": [],
            "sweeps": [],                           # 自制的没有sweeps
            "cams": dict(),
            "lidar2ego_translation": lidar2ego_translation,  # t_ego_lidar给0
            "lidar2ego_rotation": lidar2ego_rotation,        # R_ego_lidar给单位阵
            "timestamp": timestamp,

            # "num_lidar_pts": [],
            # "num_radar_pts": [],
            "valid_flag": [],
            "scene_token": [],
            "occ_path": [],
            # "ann_infos": []
        }

        # 2 传感器标定信息
        calib_path = os.path.join(root_path, "calib", scenes_name+'.txt')
        
        calib_info = {}
        with open(calib_path, "r") as f:
            lines = f.readlines()
            for line in lines:
                line = [x for x in line.strip().split(' ')]
                calib_info[line[0][:-1]] = [float(x) for x in line[1:]]
        
        # print(calib_info)

        # 3 相机信息 包含图像路径,内参,外参
        for j, cam in enumerate(camera_types):
            image_path = osp.join(root_path, 'images', cam, scenes_name + '.png')
            image_relative_path = osp.join('images', cam, scenes_name + '.png')
            # 内参 kitti是12维取前9维
           
            cam_intrinsics = np.array(calib_info['P'+ str(j)]).reshape([-1,4])[:3,:3]
            
            # 外参处理 kitti12维 3x4 前3x3为旋转
            T_cam_lidar = np.eye(4)
            
            T_cam_lidar[:3, :] =  np.array(calib_info['T_lidar2cam'+ str(j)]).reshape([-1,4])
            # T_cam_lidar[:3, :] =  np.array(calib_info['Tr_velo_to_cam'+ str(j)]).reshape([-1,4])
            
            lidar2cam_translation =T_cam_lidar[:3, 3]
            lidar2cam_rotation = T_cam_lidar[:3, :3]
            
            cam2lidar_rotation = np.linalg.inv(lidar2cam_rotation)
            cam2lidar_translation = cam2lidar_rotation @ -lidar2cam_translation.T
            
            # 内参处理
            K = np.eye(4)
            K[:3, :3] = cam_intrinsics
            lidar2img = K @ T_cam_lidar
            
            # print(T_img_lidar)
            cam_info = dict(
                    data_path=image_path,                              # 图像路径               
                                 # K
                    type = cam,
                    sensor2ego_translation = [0, 0, 0],
                    sensor2ego_rotation = [1, 0, 0, 0],
                    ego2global_translation = [0, 0, 0],
                    ego2global_rotation = [1, 0, 0, 0],
                    timestamp = timestamp,
                    sample_data_token = [],
                    sensor2lidar_rotation = [1, 0, 0, 0],
                    sensor2lidar_translation = [0, 0, 0],
                    # # camera2lidar_rotation=cam2lidar_rotation,
                    # # camera2lidar_translation=cam2lidar_translation,
                    # lidar2cam_rotation=lidar2cam_rotation,             # t_cam_lidar  雷达到相机
                    # lidar2cam_translation=lidar2cam_translation,       # R_cam_lidar
                    
                    # lidar2img=lidar2img                               # 雷达到图像
                    cam_intrinsic=cam_intrinsics,     
                    )
            
            # print(cam)
            # print(cam_info)
            
            info["cams"].update({cam: cam_info})
     
        # 4 标注信息
        label_path = os.path.join(root_path, "labels", scenes_name+'.txt') 
        
        # 标注中的信息
        gt_boxes = []
        gt_names = []
        gt_labels = []
        gt_velocity = []

        num_lidar_pts = []
        num_radar_pts = []
        is_2d_visible = []

        with open(label_path, 'r') as f:
            lines = f.readlines()
            for line in lines:
                line = [x for x in line.strip().split(' ')]
                names = line[0]
                # # 这是kitti的顺序  kitti还是地面中心点 正常的z = z - h/2
                # locs  = [float(line[11]), float(line[12]), float(line[13])]  #  xyz
                # # h, w, l = dimensions[8], dimensions[9], dimensions[10]
                # dims  = [float(line[10]), float(line[9]), float(line[8])]    #  8,9,10 hwl
                # # 正常的角度转kitti计算公式 -rots - np.pi / 2 因为kitti是与y轴的转角
                # rots  = float(line[14])                                     # 第15维为rot
                # box直接写到一起
                
                box = [float(line[11]), float(line[12]), float(line[13]), 
                       float(line[10]), float(line[9]), float(line[8]),
                       float(line[14]), 0, 0
                       ]
                
                gt_names.append(names)
                gt_boxes.append(box)
                gt_labels.append(class_names.index(names))
                gt_velocity.append([0, 0])
                

                num_lidar_pts.append(None)
                is_2d_visible.append(None)
                num_radar_pts.append(None)

            gt_names = np.array(gt_names, dtype=np.str)
            gt_boxes = np.array(gt_boxes, dtype=np.float32)
            gt_labels =np.array(gt_labels, dtype=np.int8)
            gt_velocity = np.array(gt_velocity, dtype=np.int8)


            num_lidar_pts = np.array(num_lidar_pts)
            num_radar_pts = np.array(num_lidar_pts)
            is_2d_visible = np.array(is_2d_visible)
            
            # print(gt_names, gt_boxes.shape)

        info["gt_boxes"] = gt_boxes
        info["gt_names"] = gt_names
        info["gt_labels"] = gt_labels
        info["gt_velocity"] = gt_velocity

        info["num_lidar_pts"] = num_lidar_pts
        info["is_2d_visible"] = is_2d_visible
        info["num_radar_pts"] = num_radar_pts
        
        info["ann_infos"] = [gt_boxes, gt_labels]
        print("\n")
        

        print( info["gt_boxes"])
        print( info["gt_names"])
        print( info["gt_labels"])


        print(info)

        if scenes_name in train_scenes:
            train_nusc_infos.append(info)
                
        if scenes_name in val_scenes:
            val_nusc_infos.append(info)

    return train_nusc_infos, val_nusc_infos