import argparse
import pdb
import open3d as o3d
from my_io import mkdir
import cv2
import numpy as np
import os
from tqdm import tqdm
import sys
from vis_utils import get_draw_box
from my_io import write_pickle, write_points, read_points
from utils import bbox3d2corners, group_rectangle_vertexs, group_plane_equation, points_in_bboxes
from dataset_util import check_dataset_is_splited

CUR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(CUR)

def read_label(file_path):
    with open(file_path, 'r') as f:
        lines = f.readlines()
    lines = [line.strip().split(' ') for line in lines]
    annotation = {}
    annotation['name'] = np.array([line[0:1] for line in lines])
    annotation['truncated'] = np.array([line[1:2] for line in lines], dtype=np.float)  # 截断
    annotation['occluded'] = np.array([line[2:3] for line in lines], dtype=np.int)  # 遮挡
    annotation['alpha'] = np.array([line[3:4] for line in lines], dtype=np.float)  # 观测角
    annotation['bbox'] = np.array([line[4:8] for line in lines], dtype=np.float)  # 图像2d bbox  lt rb
    annotation['dimensions'] = np.array([line[8:11] for line in lines], dtype=np.float)[:,
                               [2, 1, 0]]  # 3d 尺寸(dimensions) hwl -> lwh
    annotation['location'] = np.array([line[11:14] for line in lines],
                                      dtype=np.float)  # 相机坐标系下的坐标(location) (x, y, z), 下平面中心点的坐标
    annotation['location'][:,1] = annotation['location'][:,1] - 0.5 * annotation['dimensions'][:,2]# kitti z(camera坐标系下为y负方向)为最低 所以(z+0.5h) -> 相机坐标系(y-0.5h)然后转到lidar坐标系
    annotation['rotation_y'] = np.array([line[14:15] for line in lines], dtype=np.float)  # 相机坐标系下绕轴旋转的弧度(rotation_y)

    return annotation

def read_calib(file_path, extend_matrix=True):
    with open(file_path, 'r') as f:
        lines = f.readlines()
    lines = [line.strip() for line in lines]
    P0 = np.array([item for item in lines[0].split(' ')[1:]], dtype=np.float).reshape(3, 4)
    P1 = np.array([item for item in lines[1].split(' ')[1:]], dtype=np.float).reshape(3, 4)
    P2 = np.array([item for item in lines[2].split(' ')[1:]], dtype=np.float).reshape(3, 4)
    P3 = np.array([item for item in lines[3].split(' ')[1:]], dtype=np.float).reshape(3, 4)

    R0_rect = np.array([item for item in lines[4].split(' ')[1:]], dtype=np.float).reshape(3, 3)
    Tr_velo_to_cam = np.array([item for item in lines[5].split(' ')[1:]], dtype=np.float).reshape(3, 4)
    Tr_imu_to_velo = np.array([item for item in lines[6].split(' ')[1:]], dtype=np.float).reshape(3, 4)

    if extend_matrix:
        P0 = np.concatenate([P0, np.array([[0, 0, 0, 1]])], axis=0)
        P1 = np.concatenate([P1, np.array([[0, 0, 0, 1]])], axis=0)
        P2 = np.concatenate([P2, np.array([[0, 0, 0, 1]])], axis=0)
        P3 = np.concatenate([P3, np.array([[0, 0, 0, 1]])], axis=0)

        R0_rect_extend = np.eye(4, dtype=R0_rect.dtype)
        R0_rect_extend[:3, :3] = R0_rect
        R0_rect = R0_rect_extend

        Tr_velo_to_cam = np.concatenate([Tr_velo_to_cam, np.array([[0, 0, 0, 1]])], axis=0)
        Tr_imu_to_velo = np.concatenate([Tr_imu_to_velo, np.array([[0, 0, 0, 1]])], axis=0)

    calib_dict = dict(
        P0=P0,
        P1=P1,
        P2=P2,
        P3=P3,
        R0_rect=R0_rect,
        Tr_velo_to_cam=Tr_velo_to_cam,
        Tr_imu_to_velo=Tr_imu_to_velo
    )
    return calib_dict

# Copied from https://github.com/open-mmlab/mmdetection3d/blob/f45977008a52baaf97640a0e9b2bbe5ea1c4be34/mmdet3d/core/bbox/box_np_ops.py#L609
def projection_matrix_to_CRT_kitti(proj):
    """Split projection matrix of kitti.
    P = C @ [R|T]
    C is upper triangular matrix, so we need to inverse CR and use QR
    stable for all kitti camera projection matrix.
    Args:
        proj (p.array, shape=[4, 4]): Intrinsics of camera.
    Returns:
        tuple[np.ndarray]: Splited matrix of C, R and T.
    """

    CR = proj[0:3, 0:3]
    CT = proj[0:3, 3]
    RinvCinv = np.linalg.inv(CR)#逆矩阵
    Rinv, Cinv = np.linalg.qr(RinvCinv)
    C = np.linalg.inv(Cinv)
    R = np.linalg.inv(Rinv)
    T = Cinv @ CT
    return C, R, T

def points_camera2lidar(points, tr_velo_to_cam, r0_rect):
    '''
    points: shape=(N, 8, 3)
    tr_velo_to_cam: shape=(4, 4)
    r0_rect: shape=(4, 4)
    return: shape=(N, 8, 3)
    '''
    extended_xyz = np.pad(points, ((0, 0), (0, 0), (0, 1)), 'constant', constant_values=1.0) #为了和4x4旋转平移矩阵做运算
    rt_mat = np.linalg.inv(r0_rect @ tr_velo_to_cam) # cam2 to lidar
    xyz = extended_xyz @ rt_mat.T # lidar坐标系的坐标
    return xyz[..., :3]

def label_camera2lidar(label_dict, calib_dict):
    r0_rect = calib_dict['R0_rect']
    tr_velo_to_cam = calib_dict['Tr_velo_to_cam']
    P2 = calib_dict['P2']

    C, R, T = projection_matrix_to_CRT_kitti(P2) #内参分解为c r t
    center = label_dict['location'] - T
    center = np.linalg.inv(R) @ center.T #相机坐标系下的3d坐标
    center = points_camera2lidar(center.T[None, ...], tr_velo_to_cam, r0_rect) # (1, 8, 3) #lidar坐标系下的3d坐标

    annotation = {}
    annotation['name'] = label_dict['name']
    annotation['location'] = center[0] # box在lidar坐标系下的中心点
    annotation['dimensions'] = label_dict['dimensions']
    annotation['rotation_y'] = np.array(-0.5 * np.pi - label_dict['rotation_y'])
    return annotation

def judge_difficulty(annotation_dict):
    truncated = annotation_dict['truncated']
    occluded = annotation_dict['occluded']
    bbox = annotation_dict['bbox']
    height = bbox[:, 3] - bbox[:, 1]

    MIN_HEIGHTS = [40, 25, 25]
    MAX_OCCLUSION = [0, 1, 2]
    MAX_TRUNCATION = [0.15, 0.30, 0.50]
    difficultys = []
    for h, o, t in zip(height, occluded, truncated):
        difficulty = -1
        for i in range(2, -1, -1):
            if h > MIN_HEIGHTS[i] and o <= MAX_OCCLUSION[i] and t <= MAX_TRUNCATION[i]:
                difficulty = i
        difficultys.append(difficulty)
    return np.array(difficultys, dtype=np.int)


def create_data_info_pkl(args, data_root, data_type, label=True, db=False, offset_z=1.7):
    '''
    offset_z用于把数据的z调到0平面  注意外参已经被变动
    '''
    print(f"Processing {data_type} data..")
    ids_file = os.path.join(data_root, 'split_sets', f'{data_type}.txt')  # 确认split_sets在数据集文件夹内
    data_base = os.path.join(data_root, args.data_base)  # pkl输出路径
    dataset_dir = str.split(data_root, '/')[-1]
    with open(ids_file, 'r') as f:
        ids = [id.strip() for id in f.readlines()]

    mkdir(os.path.join(data_root, 'lidar_bin'))

    dataset_infos_dict = {}
    if db:
        mydata_dbinfos_train = {}
        db_points_saved_path = os.path.join(data_base, args.gt_database)
        os.makedirs(db_points_saved_path, exist_ok=True)
    frame_id = -1#统计帧id
    for id in tqdm(ids[args.resume:]):
        frame_id = frame_id + 1
        cur_info_dict = {}
        cur_info_dict["data_info"] = {
            "type": 'kitti'
        }
        # 存储激光信息
        lidar_path = os.path.join(data_root, 'lidar', f'{id}.bin')
        assert os.path.exists(lidar_path)
        lidar_pc = read_points(lidar_path, dim=4, datatype=np.float32)[:,:3]

        lidar_pc = lidar_pc + np.array([0, 0, offset_z])
        write_points(lidar_pc, os.path.join(data_root, 'lidar_bin', f'{id}.bin'), datatype=np.float32)

        cur_info_dict["lidar"] = {
            "path": os.path.join(dataset_dir, 'lidar_bin', f'{id}.bin'),
            "points_num": len(lidar_pc),
        }
        # 存储图像信息
        camera_info_list = []
        for dir in os.listdir(os.path.join(data_root, 'camera')):
            if dir[:5] == "image":
                camera_dict = dict()
                img_path = os.path.join(data_root, 'camera', dir, f'{id}.png')
                assert os.path.exists(img_path)
                from PIL import Image
                image = Image.open(img_path)

                if args.is_view:
                    img = cv2.imread(img_path)  # h w c
                    cv2.imshow(dir, img)
                    cv2.waitKey(1)

                camera_dict[dir] = {
                    'path': os.path.join(dataset_dir, 'camera', dir, f'{id}.png'),
                    'shape': image.size[::-1],
                }
                camera_info_list.append(camera_dict)
        cur_info_dict["camera"] = camera_info_list

        if label:

            if args.is_view:
                vis = o3d.visualization.Visualizer()
                vis.create_window(window_name="show_pred_pcd")
                render_option = vis.get_render_option()
                render_option.point_size = 2
                coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2.0, origin=[0, 0, 0])
                vis.add_geometry(coordinate_frame)
                datas = o3d.geometry.PointCloud()
                datas.points = o3d.utility.Vector3dVector(lidar_pc[:, :3])
                datas.paint_uniform_color([0.3, 0.5, 0])
                vis.add_geometry(datas)

            # 存储label信息
            label = os.path.join(data_root, 'label_txt', f'{id}.txt')
            label_dict = read_label(label)
            calib_path = os.path.join(data_root, 'calib', f'{id}.txt')
            calib_dict = read_calib(calib_path)

            label_dict = label_camera2lidar(label_dict, calib_dict)
            label_dict['location'] = label_dict['location']+np.array([[0,0,offset_z]])
            cur_info_dict['label'] = label_dict

            if db:
                boxes = np.concatenate((label_dict['location'], label_dict['dimensions'], label_dict['rotation_y']),
                                       axis=1)
                cornors = bbox3d2corners(boxes)
                group_rectangle_vertexs_v = group_rectangle_vertexs(cornors)
                frustum_surfaces = group_plane_equation(group_rectangle_vertexs_v)
                indices = points_in_bboxes(lidar_pc[:, :3],
                                           frustum_surfaces)  # (N, n), N is points num, n is bboxes number

                for j in range(len(boxes)):
                    db_points = lidar_pc[indices[:, j]]

                    if args.is_view:
                        datas2 = o3d.geometry.PointCloud()
                        datas2.points = o3d.utility.Vector3dVector(db_points[:, :3])
                        render_option.point_size = 3.0  # 设置渲染点的大小
                        datas2.paint_uniform_color([0.7, 0.2, 0.3])
                        vis.add_geometry(datas2)

                    db_points[:, :3] -= boxes[j, :3]  # 以簇中心为原点的点云簇坐标
                    name = label_dict["name"][j][0]
                    db_points_saved_name = os.path.join(db_points_saved_path,
                                                        f'{frame_id}_{name}_{j}.bin')  # 第n张图中的第m个同类别物体
                    db_points.astype(np.float32)
                    write_points(db_points, db_points_saved_name)

                    db_info = {
                        # 类别 当前簇点云bin 当前box中心和尺寸 困难程度 点云簇点数
                        'name': name,
                        'path': os.path.join(os.path.basename(data_root), args.data_base, args.gt_database, f'{frame_id}_{name}_{j}.bin'),
                        'box3d_lidar': boxes[j],
                        'num_points_in_gt': len(db_points),
                    }
                    if name not in mydata_dbinfos_train:
                        mydata_dbinfos_train[name] = [db_info]
                    else:
                        mydata_dbinfos_train[name].append(db_info)

            if args.is_view:
                exp_draw_boxes = get_draw_box(boxes)
                for box in exp_draw_boxes:
                    vis.add_geometry(box)
                vis.run()
                vis.destroy_window()

        dataset_infos_dict[int(id)] = cur_info_dict

    saved_path = os.path.join(data_base, f'infos_{data_type}.pkl')
    write_pickle(dataset_infos_dict, saved_path)
    print("<{}> info dict 写完".format(data_type))
    if db:
        saved_db_path = os.path.join(data_base, f'dbinfos_train.pkl')
        write_pickle(mydata_dbinfos_train, saved_db_path)
        print("mydata_dbinfos_train 写完")
    return dataset_infos_dict


def main(args):
    assert os.path.exists(args.data_path)
    datasets = os.listdir(args.data_path)
    datasets.sort(key=lambda x: x)
    for dataset in datasets:
        if "__" == dataset[:2]:  # 数据集以"__"开头
            dataset_path = os.path.join(args.data_path, dataset)
            main_worker(args, dataset_path)

def main_worker(args, dataset_path):
    assert os.path.exists(dataset_path)
    data_root = dataset_path

    ## 1. train: create data infomation pkl file && create reduced point clouds
    ##           && create database(points in gt bbox) for data aumentation
    kitti_train_infos_dict = create_data_info_pkl(args, data_root, 'train', db=True)

    ## 2. val: create data infomation pkl file && create reduced point clouds
    kitti_val_infos_dict = create_data_info_pkl(args, data_root, 'val')

    ## 3. trainval: create data infomation pkl file
    kitti_trainval_infos_dict = {**kitti_train_infos_dict, **kitti_val_infos_dict}
    data_base = os.path.join(data_root, 'data_base')  # pkl输出路径
    saved_path = os.path.join(data_base, f'infos_trainval.pkl')
    write_pickle(kitti_trainval_infos_dict, saved_path)

    ## 4. test: create data infomation pkl file && create reduced point clouds
    kitti_test_infos_dict = create_data_info_pkl(args, data_root, 'test', label=False)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Dataset infomation')
    parser.add_argument('--data-path', default='/home/zwh/tmp/test',
                        help='your data root path')
    parser.add_argument('--is-view', type=bool, default=False)  # 是否可视化
    parser.add_argument('--resume', type=int, default=0)  # 是否可视化
    parser.add_argument('--data-base', type=str, default="data_base")  # pkl文件夹名
    parser.add_argument('--gt-database', type=str, default="gt_database")  # samples的文件夹名
    args = parser.parse_args()

    main(args)
