import sys
import os

# 添加 project/ 目录到 sys.path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

import math
import json
import numpy as np
import open3d as o3d
from vis_utils import Vis, pc_tf
import os
import csv
from tqdm import tqdm
import cv2
import argparse
from utils import topic2path
from my_io import copy_file, read_points, write_points
from cyw_devkit.core.transform.super_transform import SuperTransform
from cyw_devkit.core.dataset import BBoxes
from tictoc import TicToc
import multiprocessing


def check_path(data_path, topics):
    topic_uni = []
    for topic in topics:
        path = os.path.join(data_path, topic2path(topic))
        if not os.path.exists(path):
            continue
        if topic in topic_uni:
            continue
        topic_uni.append(topic)
    return topic_uni


def main(args):
    # 统计耗时
    cost = TicToc("同步数据")
    assert os.path.exists(args.data_path)
    frames = os.listdir(args.data_path)
    frames.sort(key=lambda x: x)
    files = []
    for dir in frames:
        if dir[:2] != '__':  # '__'
            dir = os.path.join(args.data_path, dir)
            if os.path.isdir(dir):
                files.append(dir)

    process_size = len(files)
    manager = multiprocessing.Manager()
    if process_size > 1:
        pool = multiprocessing.Pool(process_size)
        counter_list = manager.list()
        for idx in range(process_size):
            pool.apply_async(main_worker, args=(files[idx], args))
        pool.close()
        pool.join()
    else:
        main_worker(files[0], args)

    print("---------------------------------------------------------")
    print("处理完成: {}".format(files))
    cost.toc()
    print("---------------------------------------------------------")


def main_worker(frames_path, args):
    print("开始同步数据...")

    lidar_topics = ["lidar1", "lidar2", "lidar3", "lidar4"]
    radar_topics = ["radar3"]
    label_topic = "label"
    camera_topics = ["camera75",
                     "camera77",
                     "camera80",
                     "camera81"]

    datas_stamps = dict()
    sensor_topics = []
    sensor_types = []
    for i in range(len(lidar_topics)):
        sensor_types.append('lidar')
    sensor_topics = sensor_topics + lidar_topics
    for i in range(len(radar_topics)):
        sensor_types.append('radar')
    sensor_topics = sensor_topics + radar_topics
    for i in range(len(camera_topics)):
        sensor_types.append('camera')
    sensor_topics = sensor_topics + camera_topics

    for topic, type in zip(sensor_topics, sensor_types):
        data_stamps = []
        path = os.path.join(frames_path, type, topic)
        data_files = os.listdir(path)
        data_files.sort(key=lambda x: int(x.split('.')[0]))
        for data_file in data_files:
            data_stamp = int(str.split(data_file, '.')[0])
            data_stamps.append(data_stamp)
        datas_stamps[topic] = data_stamps

    filename = os.path.join(frames_path, "localization", "localization.csv")
    with open(filename, "r") as csvfile:
        csvreader = csv.reader(csvfile)
        data_stamps = []
        # 遍历csvreader对象的每一行内容并输出
        is_header = True  # 第一行表头不要
        for row in csvreader:
            if is_header:
                is_header = False
                continue
            data_stamps.append(int(row[0]))
        datas_stamps["localization_result"] = data_stamps

    path = os.path.join(frames_path, label_topic)
    data_files = os.listdir(path)
    data_files.sort(key=lambda x: int(x.split('.')[0]))
    label_data_stamps = []
    for data_file in data_files:
        data_stamp = int(str.split(data_file, '.')[0])
        label_data_stamps.append(data_stamp)
    datas_stamps[label_topic] = label_data_stamps

    print("各话题对应的帧数")
    for topic in datas_stamps:
        print("话题<{}>   帧数<{}>".format(topic, len(datas_stamps[topic])))

    # 以前激光为中心去同步其他的数据
    center_topic = "lidar3"
    datas_pointer = dict()
    sync_datas_dict = dict()
    for topic in datas_stamps:
        datas_pointer[topic] = 0
        sync_datas_dict[topic] = []

    for cencer_stamp in tqdm(datas_stamps[center_topic], postfix="同步数据..."):
        # 遍历每个topic
        for topic in datas_pointer:
            for i in range(datas_pointer[topic], len(datas_stamps[topic]) - 1):
                if (cencer_stamp < datas_stamps[topic][i]):
                    sync_datas_dict[topic].append(datas_stamps[topic][i])
                    datas_pointer[topic] = i
                    break
                if (datas_stamps[topic][i] <= cencer_stamp and cencer_stamp <= datas_stamps[topic][i + 1]):
                    delta_left = cencer_stamp - datas_stamps[topic][i]
                    delta_right = datas_stamps[topic][i + 1] - cencer_stamp
                    if (delta_left <= delta_right):
                        sync_datas_dict[topic].append(datas_stamps[topic][i])
                    else:
                        sync_datas_dict[topic].append(datas_stamps[topic][i + 1])
                    datas_pointer[topic] = i
                    break

    # 同步后某些topic的data数据不足则补0
    sync_num = len(sync_datas_dict[center_topic])
    for topic in sync_datas_dict:
        if len(sync_datas_dict[topic]) > sync_num:
            exit(-1)
        sync_datas_dict[topic] = sync_datas_dict[topic] + [0] * (sync_num - len(sync_datas_dict[topic]))

    # 从下面开始list变成array
    # 检测同步数据质量 diff stamp最大值不得大于delta_stamp_max_thre
    delta_stamp_max = np.zeros(sync_num)
    for topic in sync_datas_dict:
        delta_stamp = np.abs(np.array(sync_datas_dict[center_topic]) - np.array(sync_datas_dict[topic]))
        is_larger = delta_stamp > delta_stamp_max
        delta_stamp_max = delta_stamp * is_larger + delta_stamp_max * (1 - is_larger)
    valid_data_idx = delta_stamp_max < args.delta_stamp_max_thre  # 同步质量达标的数据置true

    # valid_data_idx[-4]=False
    # 只保存达标的数据
    for topic in sync_datas_dict:
        sync_datas_dict[topic] = np.array(sync_datas_dict[topic])[valid_data_idx]

    # 存关键帧
    # 超级转换器，用于时间差和传感器之间的tf生成
    super_transform = SuperTransform(
        local_csv=os.path.join(frames_path, 'localization', 'localization.csv'),
        calib_json=os.path.join(frames_path, 'calib', 'calib.json'))
    with open(os.path.join(frames_path, 'calib', 'calib.json'), 'r') as f:
        json_data = json.load(f)

    keep_data_idx = []
    key_stamp = sync_datas_dict[center_topic][0]
    contiguous_time = 0
    for stamp in sync_datas_dict[center_topic]:
        dtf = super_transform.get_tf_from_time(stamp, key_stamp)
        dis = math.sqrt(np.square(dtf[0:3, 3]).sum())
        if dis > args.key_dis or contiguous_time >= args.jump - 1:  # 提取关键帧，距离上一个关键帧距离大于key_dis，或者非关键帧连续数量超过jump个
            # if dis > args.key_dis: # 提取关键帧，距离上一个关键帧距离大于key_dis
            keep_data_idx.append(True)
            contiguous_time = 0
            key_stamp = stamp
        else:
            keep_data_idx.append(False)
            contiguous_time += 1
    keep_data_idx = np.array(keep_data_idx)
    key_index = np.array(np.where(keep_data_idx)[0])
    key_info = {
        'key_index': key_index.tolist()
    }

    print(
        '粗同步数据：{},精同步数据:{}, 关键帧数据:{}'.format(len(valid_data_idx), valid_data_idx.sum(), len(key_index)))

    # jump = args.jump
    # for localization_stamp in sync_datas_dict['/localization_result']:
    #     acc_z = data_acc_z[localization_stamp]
    #     if acc_z < args.max_acc_z or jump == args.jump:
    #         keep_data_idx.append(True)
    #         jump = 1  # 从1到jump循环
    #     else:
    #         jump = jump + 1
    #         keep_data_idx.append(False)
    # keep_data_idx = np.array(keep_data_idx)
    # # 只保存达标的数据
    # for topic in sync_datas_dict:
    #     sync_datas_dict[topic] = np.array(sync_datas_dict[topic])[keep_data_idx]
    # valid_data_sum = keep_data_idx.sum()  # 同步质量达标的数据量
    valid_data_sum = len(sync_datas_dict[center_topic])

    if args.is_view:
        vis = Vis()

    # 准备save数据
    # 新建目录
    output_paths = frames_path.replace(frames_path.split('/')[-1], '__' + frames_path.split('/')[-1])  # 输出文件夹为 “__源文件夹”
    # output_paths_lidar = os.path.join(output_paths, "lidar")
    output_paths_lidar = None  # pcd不要了 ，标注时在生成key的pcd
    output_paths_lidar_bin = os.path.join(output_paths, "lidar_bin")
    output_paths_radar = os.path.join(output_paths, "radar_bin")  # 此处给radar改了名
    output_paths_camera = os.path.join(output_paths, "camera")
    output_paths_calib = os.path.join(output_paths, "calib")
    output_paths_label = os.path.join(output_paths, "label")
    output_paths_key_label = os.path.join(output_paths, "samples/label")
    output_paths_localization = os.path.join(output_paths, "localization")
    output_paths_samples = os.path.join(output_paths, "samples")
    output_paths_cameras = []
    for image_topic in camera_topics:
        output_paths_cameras.append(os.path.join(output_paths_camera, image_topic))
    for output_path in [output_paths, output_paths_lidar, output_paths_lidar_bin, output_paths_radar,
                        output_paths_camera, output_paths_calib, output_paths_label, output_paths_key_label,
                        output_paths_localization, output_paths_samples] + output_paths_cameras:
        if output_path is not None and not os.path.exists(output_path):
            print("新建目录<{}>".format(output_path))
            os.makedirs(output_path, exist_ok=True)
    # 把源文件夹内的标定参数和定位信息拷贝到新文件夹
    copy_file(os.path.join(frames_path, 'calib', 'calib.json'), os.path.join(output_paths, 'calib'))
    copy_file(os.path.join(frames_path, 'localization', 'localization.csv'), os.path.join(output_paths, 'localization'))
    # 存关键帧信息, 写入JSON文件
    with open(os.path.join(output_paths_samples, 'samples.json'), 'w', encoding='utf-8') as file:
        json.dump(key_info, file, ensure_ascii=False, indent=2)

    # 新建readme说明文件
    # 创建或打开文件
    csvfile = open(os.path.join(output_paths, 'README.csv'), mode='w', newline='')
    # 标题列表
    fieldnames = ['Item', 'Details']
    # 创建 DictWriter 对象
    write = csv.DictWriter(csvfile, fieldnames=fieldnames)
    # 写入表头
    write.writeheader()
    write.writerow({'Item': 'Bag_Name', 'Details': frames_path.split('/')[-1]})
    write.writerow({'Item': 'With_Intensity', 'Details': True})
    write.writerow({'Item': 'Description', 'Details': '<you can note something here>'})
    write.writerow({'Item': 'SyncParas',
                    'Details': 'max_continuous:{} key_dis:{}'.format(args.jump, args.key_dis)})
    write.writerow({'Item': 'Frames_Size', 'Details': valid_data_sum})
    write.writerow({'Item': 'Samples_Size', 'Details': len(key_index)})
    write.writerow({'Item': 'Topics_Size', 'Details': len(sync_datas_dict)})
    write.writerow({'Item': 'Topics', 'Details': list(sync_datas_dict.keys())})
    write.writerow({'Item': 'Split(train:val:test)', 'Details': "Waitting For Split"})
    write.writerow({'Item': 'Mission', 'Details': ['det', 'lidar', 'mmdet']})
    write.writerow({'Item': 'Version', 'Details': 'Carla'})

    for i in tqdm(range(0, valid_data_sum), postfix="save数据..."):
        target_time = sync_datas_dict[center_topic][i]
        # 拼接多个激光并save
        lidar_all = np.empty(shape=(0, 4))
        for lidar_topic in lidar_topics:
            bin_path = os.path.join(frames_path, 'lidar', lidar_topic,
                                    str(sync_datas_dict[lidar_topic][i]) + '.bin')
            pc = read_points(bin_path, dim=4)
            source_time = sync_datas_dict[lidar_topic][i]

            pc_disort = pc[:, :4]

            source_point = lidar_topic[-6:]
            past_source2now_base = super_transform.get_tf(source_time=source_time, source_point=source_point,
                                                          target_time=target_time)
            # lidar_tf = sensor_rts[lidar_topic[-6:]]
            points_trans = pc_tf(pc_disort, past_source2now_base)
            lidar_all = np.concatenate((lidar_all, points_trans))
        if args.is_save:
            sync_name = str(sync_datas_dict[center_topic][i])  # 以同步中心topic的时间戳为准，对其他topic进行重命名
            # pcd = o3d.geometry.PointCloud()
            # pcd.points = o3d.utility.Vector3dVector(lidar_all[:, :3])
            # o3d.io.write_point_cloud(os.path.join(output_paths_lidar, sync_name + ".pcd"), pcd,
            #                          write_ascii=True)  # save xyz.pcd
            write_points(lidar_all, os.path.join(output_paths_lidar_bin, sync_name + ".bin"))  # save xyzi.bin

        # 拼接多个radar并save
        radar_all = np.empty(shape=(0, 7))  # x y z 4
        for radar_topic in radar_topics:
            bin_path = os.path.join(frames_path, 'radar', radar_topic,
                                    str(sync_datas_dict[radar_topic][i]) + '.bin')
            pc = read_points(bin_path, dim=7)
            source_time = sync_datas_dict[radar_topic][i]
            source_point = radar_topic[-6:]
            past_source2now_base = super_transform.get_tf(source_time=source_time, source_point=source_point,
                                                          target_time=target_time)
            # radar_tf = sensor_rts[radar_topic[-6:]]
            points_trans = pc_tf(pc, past_source2now_base)
            radar_all = np.concatenate((radar_all, points_trans))
        if args.is_save:
            sync_name = str(sync_datas_dict[center_topic][i])  # 以同步中心topic的时间戳为准，对其他topic进行重命名
            write_points(radar_all, os.path.join(output_paths_radar, sync_name + ".bin"))

            # pcd = o3d.io.read_point_cloud(pcd_path)
            # pc = np.asarray(pcd.points)
            # radar_tf = sensor_rts[radar_topic[-6:]]
            # points_trans = pc_tf(pc, radar_tf)
            # radar_all = np.concatenate((radar_all, points_trans))
            #
            # if args.is_save:
            #     sync_name = str(sync_datas_dict[center_topic][i])  # 以同步中心topic的时间戳为准，对其他topic进行重命名
            #     pcd = o3d.geometry.PointCloud()
            #     pcd.points = o3d.utility.Vector3dVector(radar_all[:, :3])
            #     o3d.io.write_point_cloud(os.path.join(output_paths_radar, sync_name + ".pcd"), pcd, write_ascii=True)

        # save image
        images = []
        for image_topic in camera_topics:
            img_path = os.path.join(frames_path, 'camera', image_topic,
                                    str(sync_datas_dict[image_topic][i]) + '.jpg')

            copy_file(img_path, os.path.join(output_paths, 'camera', image_topic))
            # image = cv2.imread(img_path)
            # images.append(image)
            # if args.is_save:
            #     cv2.imwrite(os.path.join(output_paths, 'camera', topic2path(image_topic), sync_name + ".jpg"), image)

        # copy sync label
        label_path = os.path.join(frames_path, 'label', str(sync_datas_dict['label'][i]) + '.json')

        with open(label_path, 'r') as f:
            label = json.load(f)
        boxes = BBoxes(label)
        boxes.filter_by_points(lidar_all)  # 过滤点数低的box
        filter_label = boxes.trans_labels()  # box转label格式，用于json存储
        with open(os.path.join(output_paths_label, str(sync_datas_dict['label'][i]) + '.json'), 'w') as f:
            json.dump(filter_label, f, ensure_ascii=False, indent=2)
        if i in key_info['key_index']:
            with open(os.path.join(output_paths_key_label, str(sync_datas_dict['label'][i]) + '.json'), 'w') as f:
                json.dump(filter_label, f, ensure_ascii=False, indent=2)

        if args.is_view:
            # Note all frames must be of the same size
            # image = cv2.resize(image, (0, 0), None, .4, .4)
            # grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            # grey_3_channel = cv2.cvtColor(grey, cv2.COLOR_GRAY2BGR)
            #
            # numpy_horizontal_row1 = np.hstack((image, grey_3_channel))
            # numpy_horizontal_row2 = np.hstack((grey_3_channel, image))
            # combined_images = np.concatenate((numpy_horizontal_row1, numpy_horizontal_row2), axis=0)
            cv2.imshow('Image panel', images[0])
            cv2.waitKey(1)

            # o3d.visualization.draw_geometries([img], window_name="Open3D显示图像",
            #                                   width=1024, height=768,
            #                                   left=50, top=50,
            #                                   mesh_show_back_face=False)  # 显示图片
            # if args.is_view:
            #     vis.show_img(img)

        if args.is_view:
            vis.show_pc(lidar_all, 'lidar', color=[0.2, 0.3, 0.4])
            vis.show_pc(radar_all, 'radar', color=[1, 0.706, 0.1])
            boxes = np.array([[0, 0, 0, 1, 1, 1, 0], [5, 5, 5, 2, 1, 1, 0.3]
                              ])
            vis.show_box(boxes)
            vis.show()

    print("同步完毕")


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Configuration Parameters')
    parser.add_argument('--data-path', default="/home/adt/bags/work_space/datasets",
                        help='your data root for kitti')
    parser.add_argument('--is-view', default=False, action='store_false')  # 是否可视化
    parser.add_argument('--is-save', default=True, action='store_true')  # 是否保存
    parser.add_argument('--jump', type=int, default=5,
                        help='最多连续非关键帧数量')  # 每次累计跳{jump}帧 1就是不跳
    parser.add_argument('--key_dis', type=float, default=0.3,
                        help='关键帧距离')
    parser.add_argument('--max_acc_z', type=float, default=0.1, help='')  # z加速度高于{max_acc_z}开始跳帧， 先不用
    parser.add_argument('--delta_stamp_max_thre', type=float, default=25.0, help='')  # 同步的时间阈值
    parser.add_argument('--points-disort', action='store_true',
                        help='')
    args = parser.parse_args()

    main(args)
