import json
import argparse
import os
import random
import csv
import open3d as o3d
import numpy as np
from my_io import mkdir, readcsv
from tqdm import tqdm

def check_pandaset_size(sensor_id, path):
    sensor_name = "pandar64"
    if sensor_id == 1:
        sensor_name = "pandarGT"
    calib_num = len(os.listdir(os.path.join(path, "annotations_txt", "cuboids", sensor_name)))
    img2_num = len(os.listdir(os.path.join(path, "camera", "front_camera"))) - 3
    lidar_num = len(os.listdir(os.path.join(path, "lidar_bin", sensor_name)))
    assert lidar_num == calib_num and lidar_num == img2_num
    return lidar_num

def main(args):
    assert os.path.exists(args.data_path)
    datasets = os.listdir(args.data_path)
    datasets.sort(key=lambda x: x)
    for dataset in tqdm(datasets):
        dataset_path = os.path.join(args.data_path, dataset)
        main_worker(args, dataset_path)

def main_worker(args, dataset_path):
    frames_size = check_pandaset_size(args.sensor_id, dataset_path)
    split_dir = os.path.join(dataset_path, "split_sets")
    mkdir(split_dir)
    # 数据集按照比例划分，多余的给test集
    assert len(args.split) == 3
    train_size = args.split[0]
    test_size = args.split[1]
    val_size = args.split[2]
    piece = float(frames_size) / float(train_size + test_size + val_size)

    train_size = int(piece * train_size)
    val_size = int(piece * val_size)
    test_size = frames_size - train_size - val_size

    data_files = os.listdir(os.path.join(dataset_path, "annotations", "cuboids"))
    data_files.sort(key=lambda x: x)
    data_names = []
    for data_file in data_files:
        data_name = data_file.split('.')[0]
        data_names.append(data_name)
    data_names = np.array(data_names)
    index = np.arange(0, frames_size)
    if args.shuffle:
        random.shuffle(index)#乱序
    train_index = index[0:train_size]
    test_index = index[train_size:train_size+test_size]
    val_index = index[train_size+test_size:train_size+test_size+val_size]
    train_names = data_names[train_index]
    test_names = data_names[test_index]
    val_names = data_names[val_index]

    with open(os.path.join(split_dir, "train.txt"), "w") as f:
        for data in list(train_names):
            f.write(str(data)+"\n")

    with open(os.path.join(split_dir, "test.txt"), "w") as f:
        for data in list(test_names):
            f.write(str(data)+"\n")

    with open(os.path.join(split_dir, "val.txt"), "w") as f:
        for data in list(val_names):
            f.write(str(data)+"\n")
    f.close()

    # 新建readme说明文件
    # 创建或打开文件
    csvfile = open(os.path.join(dataset_path, 'README.csv'), mode='w', newline='')
    # 标题列表
    fieldnames = ['Item', 'Details']
    # 创建 DictWriter 对象
    write = csv.DictWriter(csvfile, fieldnames=fieldnames)
    # 写入表头
    write.writeheader()
    write.writerow({'Item': 'Bag_Name', 'Details': "Pandaset"})
    write.writerow({'Item': 'Description', 'Details': '<you can note something here>'})
    write.writerow({'Item': 'SyncParas', 'Details': ''})
    write.writerow({'Item': 'Frames_Size', 'Details': frames_size})
    write.writerow({'Item': 'Topics_Size', 'Details': ''})
    write.writerow({'Item': 'Topics', 'Details': ''})
    write.writerow({'Item': 'Split(train:test:val)', 'Details':'<{}:{}:{}>'.format(train_size, test_size, val_size)})

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description = 'Configuration Parameters')
    parser.add_argument('--data-path', default='/media/zwh/ZWH4T/ZWH/Dataset3d/final/pandarset_final', help='your data root path')
    parser.add_argument('--split', type=list, default=[18,1,1],help='train_size: test_size: val_size')
    parser.add_argument('--shuffle', type=bool, default=False, help='random choose')
    parser.add_argument('--sensor-id', type=int, default=0, help='0:pandar64, 1:pandarGT')
    args = parser.parse_args()

    main(args)
