import argparse
import open3d as o3d
import cv2
import numpy as np
import os
from tqdm import tqdm
import sys
import json
# from vis_utils import get_draw_box
from my_io import read_label, write_pickle, write_points, read_points, readcsv
from utils import bbox3d2corners, group_rectangle_vertexs, group_plane_equation, points_in_bboxes
from dataset_util import check_dataset_is_splited
from cyw_devkit import CywDataset, get_stamp
from tictoc import TicToc
import multiprocessing

CUR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(CUR)


def judge_difficulty(annotation_dict):
    truncated = annotation_dict['truncated']
    occluded = annotation_dict['occluded']
    bbox = annotation_dict['bbox']
    height = bbox[:, 3] - bbox[:, 1]

    MIN_HEIGHTS = [40, 25, 25]
    MAX_OCCLUSION = [0, 1, 2]
    MAX_TRUNCATION = [0.15, 0.30, 0.50]
    difficultys = []
    for h, o, t in zip(height, occluded, truncated):
        difficulty = -1
        for i in range(2, -1, -1):
            if h > MIN_HEIGHTS[i] and o <= MAX_OCCLUSION[i] and t <= MAX_TRUNCATION[i]:
                difficulty = i
        difficultys.append(difficulty)
    return np.array(difficultys, dtype=np.int)


def get_data_in_cyw(my_dataset, id, With_Intensity=True, Version=2.0):
    frame_path = my_dataset.get_frame_path(id)
    label_stamp = my_dataset.get_frame_stamp(id)

    cur_info_dict = {}
    cur_info_dict["data_info"] = {
        "type": 'hw',
        "stamp": label_stamp,
        "With_Intensity": With_Intensity,
        "version": Version
    }

    cur_info_dict["lidar"] = {"path": frame_path['lidar'], }
    cur_info_dict["radar"] = {"path": frame_path['radar'], }
    camera_info_list = dict()
    for camera_id, v in frame_path['camera'].items():
        camera_dict = dict()
        camera_dict['path'] = v
        camera_stamp = get_stamp(frame_path['camera'][camera_id])
        camera2base = my_dataset.data_dict['super_transform'].get_tf(source_point=camera_id,
                                                                     source_time=camera_stamp,
                                                                     target_point='base_link',
                                                                     target_time=label_stamp)
        camera_dict['cam_intrinsic'] = my_dataset.get_camera_mat(camera_id)
        camera_dict['sensor2base_link'] = camera2base
        camera_info_list[camera_id] = camera_dict
    cur_info_dict["camera"] = camera_info_list
    return cur_info_dict


def create_data_info_pkl(args, my_dataset, data_type, label=True, db=False):
    data_root = my_dataset.dataset_path
    fieldnames = ['Item', 'Details']
    csv_datas = readcsv(os.path.join(data_root, 'README.csv'), fieldnames)
    lidar_dim = 4
    With_Intensity = False
    if 'With_Intensity' in csv_datas:
        With_Intensity = csv_datas['With_Intensity']
    if 'Version' in csv_datas:
        Version = csv_datas['Version']
        # lidar_dim = 4 if With_Intensity.lower()=='true' else 3
    print(f"Processing {data_type} data..")
    ids_file = os.path.join(data_root, 'samples/samples.json')  # 确认split_sets在数据集文件夹内
    data_base = os.path.join(data_root, args.data_base)  # pkl输出路径
    os.makedirs(data_base, exist_ok=True)
    dataset_dir = str.split(data_root, '/')[-1]
    with open(ids_file, 'r') as f:
        ids = json.load(f)[data_type + '_idx']

    data_all = os.listdir(os.path.join(data_root, 'lidar_bin'))
    data_all.sort(key=lambda x: x)

    # if data_type == 'train':
    #     data_name = []
    #     for data in data_all:
    #         data_name.append(int(data.split('.')[0]))
    #
    #     data_times = {}
    #     for idx, data in enumerate(data_name):
    #         data_time = {}
    #         data_time['now'] = data
    #         if idx ==0:
    #             data_time['prev'] = None
    #         else:
    #             data_time['prev'] = data_name[idx-1]
    #
    #         if idx == len(data_name)-1:
    #             data_time['next'] = None
    #         else:
    #             data_time['next'] = data_name[idx+1]
    #         data_times[data] = data_time
    #     write_pickle(data_times, os.path.join(data_base, f'sequence.pkl'))
    #
    # split = 'training' if label else 'testing'

    dataset_infos_dict = {}
    if db:
        mydata_dbinfos_train = {}
        db_points_saved_path = os.path.join(data_base, args.gt_database)
        os.makedirs(db_points_saved_path, exist_ok=True)
    frame_id = -1
    for ii, id in tqdm(enumerate(ids)):
        frame_id = frame_id + 1
        label_stamp = my_dataset.get_frame_stamp(id)

        cur_info_dict = get_data_in_cyw(my_dataset, id, With_Intensity, Version)

        # 临时加'_normal'  >>>
        # for key,value in cur_info_dict['camera'].items():
        #     # 正则表达式模式，用于匹配camera_后面的数字部分
        #     import re
        #
        #     pattern = r'(/camera/camera(\d+)/)'
        #     # 替换函数，用于在camera_编号后面添加_normal
        #     def replace_func(match):
        #         return match.group(0).replace(match.group(2), f"{match.group(2)}_normal")
        #     # 使用re.sub和替换函数进行字符串替换
        #     cur_info_dict['camera'][key]['path'] = re.sub(pattern, replace_func, value['path'])
        # 临时加'_normal'  <<<


        # 提取历史数据，不提取label
        temp_info_dict = cur_info_dict
        cur_info_dict['sweeps'] = []
        cur_info_dict['sweeps_size'] = args.sweeps
        if args.sweeps > 0:
            cur_stamp = cur_info_dict['data_info']['stamp']
            id_pre = id
            for pre_id in range(args.sweeps):
                id_pre -= 1
                if id_pre >= 0:
                    pre_info_dict = get_data_in_cyw(my_dataset, id_pre, With_Intensity)
                    pre_stamp = pre_info_dict['data_info']['stamp']
                    pre_info_dict['pre2cur'] = my_dataset.data_dict['super_transform'].get_tf_from_time(
                        source_time=pre_stamp, target_time=cur_stamp)
                    cur_info_dict['sweeps'].append(pre_info_dict)
                # else:
                #     temp_info_dict['pre2cur'] = np.eye(4, dtype=np.float64)
                #     cur_info_dict['sweeps'].append(temp_info_dict)

        if label:
            lidar = my_dataset.get_lidar(id)
            lidar_pc = lidar.data

            if args.is_view:
                vis = o3d.visualization.Visualizer()
                vis.create_window(window_name="show_pred_pcd")
                render_option = vis.get_render_option()
                render_option.point_size = 2
                coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2.0, origin=[0, 0, 0])
                vis.add_geometry(coordinate_frame)
                datas = o3d.geometry.PointCloud()
                datas.points = o3d.utility.Vector3dVector(lidar_pc[:, :3])
                datas.paint_uniform_color([0.3, 0.5, 0])
                vis.add_geometry(datas)

            # 存储label信息
            label_path = os.path.join(data_root, 'samples/label_e2e', f'{label_stamp}.json')
            assert os.path.exists(label_path), "{} does not exist".format(label_path)
            with open(label_path, 'r') as f:
                data = json.load(f)

            annotation = {}
            annotation['name'] = np.array([obj['obj_type'].lower() for obj in data])
            annotation['location'] = np.array([[obj['psr']['position']['x'], obj['psr']['position']['y'], obj['psr']['position']['z']] for obj in data])
            annotation['dimensions'] = np.array([[obj['psr']['scale']['x'], obj['psr']['scale']['y'], obj['psr']['scale']['z']] for obj in data])
            annotation['rotation_y'] = np.array([[obj['psr']['rotation']['z']] for obj in data])
            annotation['velocity'] = np.array([[obj['psr']['velocity']['x'], obj['psr']['velocity']['y']] for obj in data])

            annotation['gt_agent_fut_trajs'] = []
            annotation['gt_agent_fut_masks'] = []
            for obj in data:
                traj_len = len(obj['psr']['gt_agent_fut_trajs']['x'])
                gt_agent_fut_traj = np.stack([np.array(obj['psr']['gt_agent_fut_trajs']['x']),np.array(obj['psr']['gt_agent_fut_trajs']['y'])], axis=1)
                annotation['gt_agent_fut_trajs'].append(gt_agent_fut_traj)
                annotation['gt_agent_fut_masks'].append(np.ones(traj_len))
            if len(annotation['gt_agent_fut_trajs']) == 0:
                annotation['gt_agent_fut_trajs'] = np.zeros((0,12, 2))
                annotation['gt_agent_fut_masks'] = np.zeros((0,12))
            else:
                annotation['gt_agent_fut_trajs'] = np.stack(annotation['gt_agent_fut_trajs'], axis=0)
                annotation['gt_agent_fut_masks'] = np.stack(annotation['gt_agent_fut_masks'], axis=0)

            label_path_ego = label_path.replace(".json",'-ego.json')
            assert os.path.exists(label_path_ego), "{} does not exist".format(label_path_ego)
            with open(label_path_ego, 'r') as f:
                data = json.load(f)

            annotation['gt_ego_fut_trajs'] = np.stack([np.array(data['gt_ego_fut_trajs']['x']), np.array(data['gt_ego_fut_trajs']['y'])], axis=1)
            annotation['gt_ego_fut_masks'] = np.ones(len(data['gt_ego_fut_trajs']['x']))
            annotation['gt_ego_fut_cmd'] = np.array(data['gt_ego_fut_cmd'])

            cur_info_dict['label'] = annotation

            # # read seg file path
            # mask_path = os.path.join(data_root, 'samples/mask', f'{id}.label')
            # if os.path.exists(mask_path):
            #     mask_path = os.path.join(dataset_dir, 'samples/mask', f'{id}.label')
            #     cur_info_dict['mask_path'] = mask_path

            if db:
                label_dict = annotation
                if (len(label_dict['location']) != 0):
                    boxes = np.concatenate((label_dict['location'], label_dict['dimensions'], label_dict['rotation_y']),
                                           axis=1)
                    cornors = bbox3d2corners(boxes)
                    group_rectangle_vertexs_v = group_rectangle_vertexs(cornors)
                    frustum_surfaces = group_plane_equation(group_rectangle_vertexs_v)
                    indices = points_in_bboxes(lidar_pc[:, :3],
                                               frustum_surfaces)  # (N, n), N is points num, n is bboxes number

                    for j in range(len(boxes)):
                        db_points = lidar_pc[indices[:, j]]

                        if args.is_view:
                            datas2 = o3d.geometry.PointCloud()
                            datas2.points = o3d.utility.Vector3dVector(db_points[:, :3])
                            datas2.paint_uniform_color([0.7, 0.2, 0.3])
                            vis.add_geometry(datas2)

                        db_points[:, :3] -= boxes[j, :3]  # 以簇中心为原点的点云簇坐标
                        name = label_dict["name"][j].lower()
                        db_points_saved_name = os.path.join(db_points_saved_path,
                                                            f'{frame_id}_{name}_{j}.bin')  # 第n张图中的第m个同类别物体
                        if db_points.shape[1] == 3:  # all samples is dim4
                            db_points = np.concatenate((db_points, np.zeros(shape=(len(db_points), 1))), axis=1)
                        db_points.astype(np.float32)
                        write_points(db_points, db_points_saved_name)

                        db_info = {
                            # 类别 当前簇点云bin 当前box中心和尺寸 困难程度 点云簇点数
                            'name': name,
                            'path': os.path.join(os.path.basename(data_root),
                                                 args.data_base, args.gt_database,
                                                 f'{frame_id}_{name}_{j}.bin'),
                            'image_idx': '',
                            'gt_idx': j,
                            'box3d_lidar': boxes[j],
                            'num_points_in_gt': len(db_points),
                            'difficulty': 0,
                            'group_id': j
                        }
                        if name not in mydata_dbinfos_train:
                            mydata_dbinfos_train[name] = [db_info]
                        else:
                            mydata_dbinfos_train[name].append(db_info)

            if args.is_view:
                exp_draw_boxes = get_draw_box(boxes)
                for box in exp_draw_boxes:
                    vis.add_geometry(box)
                vis.run()
                vis.destroy_window()

        dataset_infos_dict[int(label_stamp)] = cur_info_dict

    saved_path = os.path.join(data_base, f'infos_{data_type}_e2e.pkl')
    write_pickle(dataset_infos_dict, saved_path)
    print("<{}> info dict 写完".format(data_type))
    if db:
        saved_db_path = os.path.join(data_base, f'dbinfos_train.pkl')
        write_pickle(mydata_dbinfos_train, saved_db_path)
        print("mydata_dbinfos_train 写完")
    return dataset_infos_dict


def main(args):
    # 统计耗时
    cost = TicToc("pkl生成")
    assert os.path.exists(args.data_path)
    datasets = os.listdir(args.data_path)
    datasets.sort(key=lambda x: x)
    files = []
    for dataset in datasets:
        if "__" == dataset[:2]:  # 数据集以"__"开头
            dataset_path = os.path.join(args.data_path, dataset)
            # assert check_dataset_is_splited(dataset_path)  # 上一步已经完成
            files.append(dataset_path)

    process_size = len(files)
    manager = multiprocessing.Manager()
    if process_size > 1:
        pool = multiprocessing.Pool(process_size)
        counter_list = manager.list()
        for idx in range(process_size):
            pool.apply_async(main_worker, args=(files[idx], args))
        pool.close()
        pool.join()
    else:
        main_worker(files[0], args)

    print("---------------------------------------------------------")
    print("处理完成: {}".format(files))
    cost.toc()
    print("---------------------------------------------------------")


def main_worker(dataset_path, args):
    data_root = dataset_path
    if 'town' in dataset_path or 'carla' in dataset_path:
        radar_dim = 7
        normal_img=False
    else:
        radar_dim = 5
        normal_img=True

    my_dataset = CywDataset(dataset_path=data_root, normal_img=normal_img, radar_dim=radar_dim)
    ## 1. train: create data infomation pkl file && create reduced point clouds
    ##           && create database(points in gt bbox) for data aumentation
    kitti_train_infos_dict = create_data_info_pkl(args, my_dataset, 'train', db=args.no_db)

    ## 2. val: create data infomation pkl file && create reduced point clouds
    kitti_val_infos_dict = create_data_info_pkl(args, my_dataset, 'val')

    ## 3. trainval: create data infomation pkl file
    kitti_trainval_infos_dict = {**kitti_train_infos_dict, **kitti_val_infos_dict}
    data_base = os.path.join(data_root, 'data_base')  # pkl输出路径
    saved_path = os.path.join(data_base, f'infos_trainval.pkl')
    write_pickle(kitti_trainval_infos_dict, saved_path)

    ## 4. test: create data infomation pkl file && create reduced point clouds
    kitti_test_infos_dict = create_data_info_pkl(args, my_dataset, 'test', label=False)

    import csv
    # readme加一行数据划分说明
    fieldnames = ['Item', 'Details']
    csv_datas = readcsv(os.path.join(dataset_path, 'README.csv'), fieldnames)
    csv_datas['Version'] = '3.0'
    csvfile = open(os.path.join(dataset_path, 'README.csv'), mode='w', newline='')
    # 创建 DictWriter 对象
    write = csv.DictWriter(csvfile, fieldnames=fieldnames)
    # 写入表头
    for key in csv_datas:
        write.writerow({'Item': key, 'Details': csv_datas[key]})


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Dataset infomation')
    parser.add_argument('--data-path', default='/media/adt/T7/ZWH/docker/files/data/motion/datasets',
                        help='your data root path')
    parser.add_argument('--is-view', type=bool, default=False)  # 是否可视化
    parser.add_argument('--no-db', action='store_true')  # 是否生成data sample
    parser.add_argument('--data-base', type=str, default="data_base")  # pkl文件夹名
    parser.add_argument('--sweeps', type=int, default=10)  # 历史帧的数量
    parser.add_argument('--gt-database', type=str, default="gt_database")  # samples的文件夹名
    args = parser.parse_args()

    main(args)
