import argparse
import open3d as o3d
import cv2
import numpy as np
import os
from tqdm import tqdm
import sys
# from vis_utils import get_draw_box
from my_io import read_delft_label, write_pickle, write_points, read_points, readcsv
from utils import bbox3d2corners, group_rectangle_vertexs, group_plane_equation, points_in_bboxes
from dataset_util import check_dataset_is_splited

CUR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(CUR)


def judge_difficulty(annotation_dict):
    truncated = annotation_dict['truncated']
    occluded = annotation_dict['occluded']
    bbox = annotation_dict['bbox']
    height = bbox[:, 3] - bbox[:, 1]

    MIN_HEIGHTS = [40, 25, 25]
    MAX_OCCLUSION = [0, 1, 2]
    MAX_TRUNCATION = [0.15, 0.30, 0.50]
    difficultys = []
    for h, o, t in zip(height, occluded, truncated):
        difficulty = -1
        for i in range(2, -1, -1):
            if h > MIN_HEIGHTS[i] and o <= MAX_OCCLUSION[i] and t <= MAX_TRUNCATION[i]:
                difficulty = i
        difficultys.append(difficulty)
    return np.array(difficultys, dtype=np.int)


def create_data_info_pkl(args, data_root, data_type, label=True, db=False):
    sep = os.path.sep
    fieldnames = ['Item', 'Details']
    csv_datas = readcsv(os.path.join(data_root,  'README.csv'), fieldnames)
    lidar_dim = 4
    With_Intensity = False
    if 'With_Intensity' in csv_datas:
        With_Intensity = csv_datas['With_Intensity']
        # lidar_dim = 4 if With_Intensity.lower()=='true' else 3
    print(f"Processing {data_type} data..")
    ids_file = os.path.join(data_root, 'samples/split_sets', f'{data_type}.txt')  # 确认split_sets在数据集文件夹内
    data_base = os.path.join(data_root, args.data_base)  # pkl输出路径
    dataset_dir = str.split(data_root, '/')[-1]
    with open(ids_file, 'r') as f:
        ids = [id.strip() for id in f.readlines()]

    data_all = os.listdir(os.path.join(data_root,'lidar_bin'))
    data_all.sort(key=lambda x: x)

    if data_type == 'train':
        data_name = []
        for data in data_all:
            data_name.append(int(data.split('.')[0]))

        data_times = {}
        for idx, data in enumerate(data_name):
            data_time = {}
            data_time['now'] = data
            if idx ==0:
                data_time['prev'] = None
            else:
                data_time['prev'] = data_name[idx-1]

            if idx == len(data_name)-1:
                data_time['next'] = None
            else:
                data_time['next'] = data_name[idx+1]
            data_times[data] = data_time
        write_pickle(data_times, os.path.join(data_base, f'sequence.pkl'))

    split = 'training' if label else 'testing'

    dataset_infos_dict = {}
    if db:
        mydata_dbinfos_train = {}
        db_points_saved_path = os.path.join(data_base, args.gt_database)
        os.makedirs(db_points_saved_path, exist_ok=True)
    frame_id = -1
    for ii, id in tqdm(enumerate(ids), desc='idx', total=len(ids)):
        frame_id = frame_id + 1
        cur_info_dict = {}
        cur_info_dict["data_info"] = {
            "type": 'delft',
            "With_Intensity": With_Intensity,
            "now": id,
        }
        # 存储激光信息
        lidar_path = os.path.join(data_root, 'lidar_bin', f'{id}.bin')
        assert os.path.exists(lidar_path)
        lidar_pc = read_points(lidar_path, dim=lidar_dim, datatype=np.float32)
        cur_info_dict["lidar"] = {
            "path": os.path.join(dataset_dir, 'lidar_bin', f'{id}.bin'),
            "points_num": len(lidar_pc),
        }
        # 存储毫米波信息
        radar_path = os.path.join(data_root, 'radar_bin', f'{id}.bin')
        assert os.path.exists(radar_path)
        radar_pc = read_points(radar_path, dim=7, datatype=np.float32)  # x,y,z,r_speed,rcs
        cur_info_dict["radar"] = {
            "path": os.path.join(dataset_dir, 'radar_bin', f'{id}.bin'),
            "points_num": len(radar_pc),
        }

        # save seq info
        sequence_path = os.path.join(data_root,'data_base/sequence.pkl')
        if os.path.exists(sequence_path):
            cur_info_dict["sequence"] = {
                "path": os.path.join(dataset_dir,'data_base/sequence.pkl')
            }

        # 存储图像信息
        camera_info_list = []
        for dir in os.listdir(os.path.join(data_root, 'camera')):
            if dir[:6] == "camera":
                img_path = os.path.join(data_root, 'camera', dir, f'{id}.jpg')
                assert os.path.exists(img_path)
                img = cv2.imread(img_path)
                # if args.is_view:
                #     cv2.imshow(dir, img)
                #     cv2.waitKey(1)
                image_shape = img.shape[:2]
                camera_dict = {
                    'idx': dir,
                    'path': os.path.join(dataset_dir, 'camera', dir, f'{id}.jpg'),
                    'shape': image_shape,
                }
                camera_info_list.append(camera_dict)
        cur_info_dict["camera"] = camera_info_list

        if label:

            if args.is_view:
                vis = o3d.visualization.Visualizer()
                vis.create_window(window_name="show_pred_pcd")
                render_option = vis.get_render_option()
                render_option.point_size = 2
                coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2.0, origin=[0, 0, 0])
                vis.add_geometry(coordinate_frame)
                datas = o3d.geometry.PointCloud()
                datas.points = o3d.utility.Vector3dVector(lidar_pc[:, :3])
                datas.paint_uniform_color([0.3, 0.5, 0])
                vis.add_geometry(datas)

            # 存储label信息
            label_path = os.path.join(data_root, 'samples/label_txt', f'{id}.txt')
            if os.path.exists(label_path):
                label_dict = read_delft_label(label_path)
                cur_info_dict['label'] = label_dict
            # read seg file path
            mask_path = os.path.join(data_root, 'samples/mask', f'{id}.label')
            if os.path.exists(mask_path):
                mask_path = os.path.join(dataset_dir, 'samples/mask', f'{id}.label')
                cur_info_dict['mask_path'] = mask_path

            if db:
                if (len(label_dict['location']) != 0):
                    boxes = np.concatenate((label_dict['location'], label_dict['dimensions'], label_dict['rotation_y']),
                                           axis=1)
                    cornors = bbox3d2corners(boxes)
                    group_rectangle_vertexs_v = group_rectangle_vertexs(cornors)
                    frustum_surfaces = group_plane_equation(group_rectangle_vertexs_v)
                    indices = points_in_bboxes(lidar_pc[:, :3],
                                               frustum_surfaces)  # (N, n), N is points num, n is bboxes number

                    for j in range(len(boxes)):
                        db_points = lidar_pc[indices[:, j]]

                        if args.is_view:
                            datas2 = o3d.geometry.PointCloud()
                            datas2.points = o3d.utility.Vector3dVector(db_points[:, :3])
                            datas2.paint_uniform_color([0.7, 0.2, 0.3])
                            vis.add_geometry(datas2)

                        db_points[:, :3] -= boxes[j, :3]  # 以簇中心为原点的点云簇坐标
                        name = label_dict["name"][j].lower()
                        db_points_saved_name = os.path.join(db_points_saved_path,
                                                            f'{frame_id}_{name}_{j}.bin')  # 第n张图中的第m个同类别物体
                        if db_points.shape[1]==3: # all samples is dim4
                            db_points = np.concatenate((db_points, np.zeros(shape=(len(db_points),1))),axis=1)
                        db_points.astype(np.float32)
                        write_points(db_points, db_points_saved_name)

                        db_info = {
                            # 类别 当前簇点云bin 当前box中心和尺寸 困难程度 点云簇点数
                            'name': name,
                            'path': os.path.join(os.path.basename(data_root),
                                                 args.data_base, args.gt_database,
                                                 f'{frame_id}_{name}_{j}.bin'),
                            'image_idx': '',
                            'gt_idx': j,
                            'box3d_lidar': boxes[j],
                            'num_points_in_gt': len(db_points),
                            'difficulty': 0,
                            'group_id': j
                        }
                        if name not in mydata_dbinfos_train:
                            mydata_dbinfos_train[name] = [db_info]
                        else:
                            mydata_dbinfos_train[name].append(db_info)

            if args.is_view:
                exp_draw_boxes = get_draw_box(boxes)
                for box in exp_draw_boxes:
                    vis.add_geometry(box)
                vis.run()
                vis.destroy_window()

        dataset_infos_dict[int(id)] = cur_info_dict

    saved_path = os.path.join(data_base, f'infos_{data_type}.pkl')
    write_pickle(dataset_infos_dict, saved_path)
    print("<{}> info dict 写完".format(data_type))
    if db:
        saved_db_path = os.path.join(data_base, f'dbinfos_train.pkl')
        write_pickle(mydata_dbinfos_train, saved_db_path)
        print("mydata_dbinfos_train 写完")
    return dataset_infos_dict


def main(args):
    assert os.path.exists(args.data_path)
    datasets = os.listdir(args.data_path)
    datasets.sort(key=lambda x: x)
    for dataset in datasets:
        if "__" == dataset[:2]:  # 数据集以"__"开头
            dataset_path = os.path.join(args.data_path, dataset)
            assert check_dataset_is_splited(dataset_path)  # 上一步已经完成
            main_worker(args, dataset_path)


def main_worker(args, dataset_path):
    data_root = dataset_path

    ## 1. train: create data infomation pkl file && create reduced point clouds
    ##           && create database(points in gt bbox) for data aumentation
    kitti_train_infos_dict = create_data_info_pkl(args, data_root, 'train', db=True)

    ## 2. val: create data infomation pkl file && create reduced point clouds
    kitti_val_infos_dict = create_data_info_pkl(args, data_root, 'val')

    ## 3. trainval: create data infomation pkl file
    kitti_trainval_infos_dict = {**kitti_train_infos_dict, **kitti_val_infos_dict}
    data_base = os.path.join(data_root, 'data_base')  # pkl输出路径
    saved_path = os.path.join(data_base, f'infos_trainval.pkl')
    write_pickle(kitti_trainval_infos_dict, saved_path)

    ## 4. test: create data infomation pkl file && create reduced point clouds
    kitti_test_infos_dict = create_data_info_pkl(args, data_root, 'test', label=False)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Dataset infomation')
    parser.add_argument('--data-path', default='/media/adt/ZWH4T/ZWH/Dataset3d/vod',
                        help='your data root path')
    parser.add_argument('--is-view', type=bool, default=False)  # 是否可视化
    parser.add_argument('--data-base', type=str, default="data_base")  # pkl文件夹名
    parser.add_argument('--gt-database', type=str, default="gt_database")  # samples的文件夹名
    args = parser.parse_args()

    main(args)
