import json
import argparse
import os
import random
import csv
import open3d as o3d
import numpy as np
from my_io import mkdir, readcsv
from dataset_util import check_sample_size, check_dataset_is_splited
from tqdm import tqdm


def main(args):
    assert os.path.exists(args.data_path)
    datasets = os.listdir(args.data_path)
    datasets.sort(key=lambda x: x)
    for dataset in tqdm(datasets):
        if "__" == dataset[:2]:  # 数据集以"__"开头
            dataset_path = os.path.join(args.data_path, dataset)
            main_worker(args, dataset_path)


def get_indxex(train_ratio, val_ratio, test_ratio, index, epoch=500):
    train_index = []
    val_index = []
    test_index = []
    n = int(len(index)/epoch)
    m = len(index)%epoch
    piece = float(epoch) / float(train_ratio + val_ratio + test_ratio)
    train_size = int(piece * train_ratio)
    val_size = int(piece * val_ratio)
    test_size = epoch - train_size - val_size
    piece2 = float(m) / float(train_ratio + val_ratio + test_ratio)
    train_size2 = int(piece2 * train_ratio)
    val_size2 = int(piece2 * val_ratio)
    test_size2 = m - train_size2 - val_size2
    for idx in index:
        idx_epoch = idx % epoch
        if idx / epoch < n:
            if idx_epoch < train_size:
                train_index.append(idx)
                continue
            elif idx_epoch < train_size + val_size:
                val_index.append(idx)
                continue
            else:
                test_index.append(idx)
        else:
            if idx_epoch < train_size2:
                train_index.append(idx)
                continue
            elif idx_epoch < train_size2 + val_size2:
                val_index.append(idx)
                continue
            else:
                test_index.append(idx)
    return train_index, val_index, test_index


def main_worker(args, dataset_path):
    Samples_Size = check_sample_size(dataset_path)
    if check_dataset_is_splited(dataset_path):  # 当前步没有完成
        print("数据集<{}>已经划分".format(dataset_path))
    split_dir = os.path.join(dataset_path, "samples/split_sets")
    mkdir(split_dir)
    # 数据集按照比例划分，多余的给test集
    assert len(args.split) == 3
    train_size = args.split[0]
    test_size = args.split[1]
    val_size = args.split[2]

    data_files = os.listdir(os.path.join(dataset_path, "samples/lidar_bin"))
    data_files.sort(key=lambda x: x)
    data_names = []
    for data_file in data_files:
        data_name = data_file.split('.')[0]
        data_names.append(data_name)
    data_names = np.array(data_names)
    index = np.arange(0, Samples_Size)
    if args.shuffle:
        random.shuffle(index)  # 乱序

    train_index, val_index, test_index = get_indxex(train_ratio=train_size,val_ratio=val_size,test_ratio=test_size,index=index)

    # readme加一行数据划分说明
    fieldnames = ['Item', 'Details']
    csv_datas = readcsv(os.path.join(dataset_path, 'README.csv'), fieldnames)
    csv_datas['Split(train:test:val)'] = '<{}:{}:{}>'.format(train_size, test_size, val_size)
    csvfile = open(os.path.join(dataset_path, 'README.csv'), mode='w', newline='')
    # 创建 DictWriter 对象
    write = csv.DictWriter(csvfile, fieldnames=fieldnames)
    # 写入表头
    for key in csv_datas:
        write.writerow({'Item': key, 'Details': csv_datas[key]})

    train_names = data_names[train_index]
    test_names = data_names[test_index]
    val_names = data_names[val_index]

    with open(os.path.join(split_dir, "train.txt"), "w") as f:
        for data in list(train_names):
            f.write(str(data) + "\n")

    with open(os.path.join(split_dir, "test.txt"), "w") as f:
        for data in list(test_names):
            f.write(str(data) + "\n")

    with open(os.path.join(split_dir, "val.txt"), "w") as f:
        for data in list(val_names):
            f.write(str(data) + "\n")
    f.close()


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Configuration Parameters')
    parser.add_argument('--data-path', default='/media/adt/ZWH4T/ZWH/Dataset3d/final/dr', help='your data root path')
    parser.add_argument('--split', type=list, default=[9, 0, 1], help='train_size: test_size: val_size')
    parser.add_argument('--shuffle', type=bool, default=False, help='random choose')
    args = parser.parse_args()

    main(args)
