import argparse
import open3d as o3d
import cv2
import numpy as np
import os
from tqdm import tqdm
import sys
from vis_utils import get_draw_box
from my_io import mkdir, read_label, write_pickle, write_points, read_points
from utils import bbox3d2corners, group_rectangle_vertexs, group_plane_equation, points_in_bboxes
from dataset_util import check_dataset_is_splited

CUR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(CUR)

def create_data_info_pkl(args, data_root, data_type, label=True, db=False, semseg=False):
    sensor_name = "pandar64"
    if args.sensor_id == 1:
        sensor_name = "pandarGT"
    print(f"Processing {data_type} data..")
    ids_file = os.path.join(data_root, 'split_sets', f'{data_type}.txt')  # 确认split_sets在数据集文件夹内
    data_base = os.path.join(data_root, args.data_base)  # pkl输出路径
    mkdir(data_base)
    dataset_dir = str.split(data_root, '/')[-1]
    with open(ids_file, 'r') as f:
        ids = [id.strip() for id in f.readlines()]

    dataset_infos_dict = {}
    if db:
        mydata_dbinfos_train = {}
        db_points_saved_path = os.path.join(data_base, args.gt_database)
        os.makedirs(db_points_saved_path, exist_ok=True)
    frame_id = -1
    for id in tqdm(ids):
        frame_id = frame_id + 1
        cur_info_dict = {}
        cur_info_dict["data_info"] = {
            "type": 'pandaset'
        }
        # 存储激光信息
        lidar_path = os.path.join(data_root, 'lidar_bin', sensor_name, f'{id}.bin')
        assert os.path.exists(lidar_path)
        lidar_pc = read_points(lidar_path, dim=3, datatype=np.float32)
        cur_info_dict["lidar"] = {
            "path": os.path.join(dataset_dir, 'lidar_bin', sensor_name, f'{id}.bin'),
            "points_num": len(lidar_pc),
        }
        # 存储图像信息
        camera_info_list = []
        for dir in os.listdir(os.path.join(data_root, 'camera')):
            if dir[-6:] == "camera":
                camera_dict = dict()
                img_path = os.path.join(data_root, 'camera', dir, f'{id}.jpg')
                assert os.path.exists(img_path)
                img = cv2.imread(img_path)
                if args.is_view:
                    cv2.imshow(dir, img)
                    cv2.waitKey(1)
                image_shape = img.shape[:2]
                camera_dict = {
                    'idx': dir,
                    'path': os.path.join(dataset_dir, 'camera', dir, f'{id}.jpg'),
                    'shape': image_shape,
                }
                camera_info_list.append(camera_dict)
        cur_info_dict["camera"] = camera_info_list

        if not os.path.exists(os.path.join(data_root, 'annotations_txt', 'semseg')):
            semseg = False

        if semseg:
            semseg_path = os.path.join(data_root, 'annotations_txt', 'semseg', sensor_name, f'{id}.bin')
            assert os.path.exists(semseg_path)
            lidar_semseg = read_points(semseg_path, dim=1, datatype=np.float32)
            cur_info_dict["semseg"] = {
                "path": os.path.join(dataset_dir, 'annotations_txt', 'semseg', sensor_name, f'{id}.bin'),
                "semseg_num": len(lidar_semseg),
            }
            cur_info_dict["semcls"] = {
                "path": os.path.join(dataset_dir, 'annotations_txt', 'semseg', 'classes.json'),
            }

        if label:
            if args.is_view:
                vis = o3d.visualization.Visualizer()
                vis.create_window(window_name="show_pred_pcd")
                render_option = vis.get_render_option()
                render_option.point_size = 2
                coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2.0, origin=[0, 0, 0])
                vis.add_geometry(coordinate_frame)
                datas = o3d.geometry.PointCloud()
                datas.points = o3d.utility.Vector3dVector(lidar_pc[:, :3])
                datas.paint_uniform_color([0.3, 0.5, 0])
                vis.add_geometry(datas)

            # 存储label信息
            label = os.path.join(data_root, 'annotations_txt', 'cuboids', sensor_name, f'{id}.txt')
            label_dict = read_label(label)

            cur_info_dict['label'] = label_dict
            if len(label_dict['location']) == 0:
                continue

            if db:
                if (len(label_dict['location']) == 0):
                    continue
                boxes = np.concatenate((label_dict['location'], label_dict['dimensions'], label_dict['rotation_y']),
                                       axis=1)
                cornors = bbox3d2corners(boxes)
                group_rectangle_vertexs_v = group_rectangle_vertexs(cornors)
                frustum_surfaces = group_plane_equation(group_rectangle_vertexs_v)
                indices = points_in_bboxes(lidar_pc[:, :3],
                                           frustum_surfaces)  # (N, n), N is points num, n is bboxes number

                for j in range(len(boxes)):
                    db_points = lidar_pc[indices[:, j]]

                    if args.is_view:
                        datas2 = o3d.geometry.PointCloud()
                        datas2.points = o3d.utility.Vector3dVector(db_points[:, :3])
                        datas2.paint_uniform_color([0.7, 0.2, 0.3])
                        vis.add_geometry(datas2)

                    db_points[:, :3] -= boxes[j, :3]  # 以簇中心为原点的点云簇坐标
                    name = label_dict["name"][j].lower()
                    db_points_saved_name = os.path.join(db_points_saved_path,
                                                        f'{frame_id}_{name}_{j}.bin')  # 第n张图中的第m个同类别物体
                    db_points.astype(np.float32)
                    write_points(db_points, db_points_saved_name)

                    db_info = {
                        # 类别 当前簇点云bin 当前box中心和尺寸 困难程度 点云簇点数
                        'name': name,
                        'path': os.path.join(os.path.basename(data_root),
                                             args.data_base, args.gt_database,
                                             f'{frame_id}_{name}_{j}.bin'),
                        'image_idx': '',
                        'gt_idx': j,
                        'box3d_lidar': boxes[j],
                        'num_points_in_gt': len(db_points),
                        'difficulty': 0,
                        'group_id': j
                    }
                    if name not in mydata_dbinfos_train:
                        mydata_dbinfos_train[name] = [db_info]
                    else:
                        mydata_dbinfos_train[name].append(db_info)

            if args.is_view:
                exp_draw_boxes = get_draw_box(boxes)
                for box in exp_draw_boxes:
                    vis.add_geometry(box)
                vis.run()
                vis.destroy_window()

        dataset_infos_dict[int(id)] = cur_info_dict

    saved_path = os.path.join(data_base, f'infos_{data_type}.pkl')
    write_pickle(dataset_infos_dict, saved_path)
    print("<{}> info dict 写完".format(data_type))
    if db:
        saved_db_path = os.path.join(data_base, f'dbinfos_train.pkl')
        write_pickle(mydata_dbinfos_train, saved_db_path)
        print("mydata_dbinfos_train 写完")
    return dataset_infos_dict


def main(args):
    assert os.path.exists(args.data_path)
    datasets = os.listdir(args.data_path)
    datasets.sort(key=lambda x: x)
    for dataset in tqdm(datasets):
        if args.start_idx != -1 and args.start_idx > int(dataset[-3:]):
            continue
        dataset_path = os.path.join(args.data_path, dataset)
        main_worker(args, dataset_path)

def main_worker(args, dataset_path):
    data_root = dataset_path

    ## 1. train: create data infomation pkl file && create reduced point clouds
    ##           && create database(points in gt bbox) for data aumentation
    kitti_train_infos_dict = create_data_info_pkl(args, data_root, 'train', db=True, semseg=True)

    ## 2. val: create data infomation pkl file && create reduced point clouds
    kitti_val_infos_dict = create_data_info_pkl(args, data_root, 'val', semseg=True)

    ## 3. trainval: create data infomation pkl file
    kitti_trainval_infos_dict = {**kitti_train_infos_dict, **kitti_val_infos_dict}
    data_base = os.path.join(data_root, 'data_base')  # pkl输出路径
    saved_path = os.path.join(data_base, f'infos_trainval.pkl')
    write_pickle(kitti_trainval_infos_dict, saved_path)

    ## 4. test: create data infomation pkl file && create reduced point clouds
    kitti_test_infos_dict = create_data_info_pkl(args, data_root, 'test', label=False)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Dataset infomation')
    parser.add_argument('--data-path', default='/media/zwh/ZWH4T/ZWH/Dataset3d/final/pandarset_final',
                        help='your data root path')
    parser.add_argument('--start-idx', default=-1, help='start process id')
    parser.add_argument('--is-view', type=bool, default=False)  # 是否可视化
    parser.add_argument('--data-base', type=str, default="data_base")  # pkl文件夹名
    parser.add_argument('--gt-database', type=str, default="gt_database")  # samples的文件夹名
    parser.add_argument('--sensor-id', type=int, default=0, help='0:pandar64, 1:pandarGT')
    args = parser.parse_args()

    main(args)