import json
import argparse
import os
import random
import csv
import open3d as o3d
import numpy as np
from my_io import mkdir, readcsv
from dataset_util import check_sample_size, check_dataset_is_splited
from tqdm import tqdm
from tictoc import TicToc
import multiprocessing

def main(args):
    # 统计耗时
    cost = TicToc("数据划分")
    assert os.path.exists(args.data_path)
    datasets = os.listdir(args.data_path)
    datasets.sort(key=lambda x: x)
    files = []
    for dataset in tqdm(datasets):
        if "__" == dataset[:2]:  # 数据集以"__"开头
            dataset_path = os.path.join(args.data_path, dataset)
            files.append(dataset_path)

    process_size = len(files)
    manager = multiprocessing.Manager()
    if process_size > 1:
        pool = multiprocessing.Pool(process_size)
        counter_list = manager.list()
        for idx in range(process_size):
            pool.apply_async(main_worker, args=(files[idx], args))
        pool.close()
        pool.join()
    else:
        main_worker(files[0], args)

    print("---------------------------------------------------------")
    print("处理完成: {}".format(files))
    cost.toc()
    print("---------------------------------------------------------")

def get_indxex(train_ratio, val_ratio, test_ratio, index, epoch=500):
    train_index = []
    val_index = []
    test_index = []
    n = int(len(index) / epoch)
    m = len(index) % epoch
    piece = float(epoch) / float(train_ratio + val_ratio + test_ratio)
    train_size = int(piece * train_ratio)
    val_size = int(piece * val_ratio)
    test_size = epoch - train_size - val_size
    piece2 = float(m) / float(train_ratio + val_ratio + test_ratio)
    train_size2 = int(piece2 * train_ratio)
    val_size2 = int(piece2 * val_ratio)
    test_size2 = m - train_size2 - val_size2
    for idx in index:
        idx_epoch = idx % epoch
        if idx / epoch < n:
            if idx_epoch < train_size:
                train_index.append(idx)
                continue
            elif idx_epoch < train_size + val_size:
                val_index.append(idx)
                continue
            else:
                test_index.append(idx)
        else:
            if idx_epoch < train_size2:
                train_index.append(idx)
                continue
            elif idx_epoch < train_size2 + val_size2:
                val_index.append(idx)
                continue
            else:
                test_index.append(idx)
    return train_index, val_index, test_index


def main_worker(dataset_path, args):
    # Samples_Size = check_sample_size(dataset_path)
    # if check_dataset_is_splited(dataset_path):  # 当前步没有完成
    #     print("数据集<{}>已经划分".format(dataset_path))
    # split_dir = os.path.join(dataset_path, "samples/split_sets")
    # mkdir(split_dir)
    # 数据集按照比例划分，多余的给test集
    split_list = list(map(int, args.split.split(',')))
    val_datasets = list(map(str, args.val_datasets.split(',')))
    if dataset_path.split('/')[-1] in val_datasets:
        split_list = [0,0,10]

    assert len(split_list) == 3
    train_size = split_list[0]
    test_size = split_list[1]
    val_size = split_list[2]

    with open(os.path.join(dataset_path, "samples/samples.json"), "r") as f:
        json_samples = json.load(f)
    key_index = json_samples['key_index']
    Samples_Size = len(key_index)

    key_index = np.array(key_index)
    index = np.arange(0, Samples_Size)
    if args.shuffle:
        random.shuffle(index)  # 乱序

    train_index, val_index, test_index = get_indxex(train_ratio=train_size, val_ratio=val_size, test_ratio=test_size,
                                                    index=index)

    # readme加一行数据划分说明
    fieldnames = ['Item', 'Details']
    csv_datas = readcsv(os.path.join(dataset_path, 'README.csv'), fieldnames)
    csv_datas['Split(train:val:test)'] = '<{}:{}:{}>'.format(len(train_index), len(val_index), len(test_index))
    csvfile = open(os.path.join(dataset_path, 'README.csv'), mode='w', newline='')
    # 创建 DictWriter 对象
    write = csv.DictWriter(csvfile, fieldnames=fieldnames)
    # 写入表头
    for key in csv_datas:
        write.writerow({'Item': key, 'Details': csv_datas[key]})

    train_idx = key_index[train_index]
    test_idx = key_index[test_index]
    val_idx = key_index[val_index]

    json_samples['train_idx'] = train_idx.tolist()
    json_samples['val_idx'] = val_idx.tolist()
    json_samples['test_idx'] = test_idx.tolist()

    # 存关键帧信息, 写入JSON文件
    with open(os.path.join(dataset_path, "samples/samples.json"), 'w', encoding='utf-8') as file:
        json.dump(json_samples, file, ensure_ascii=False, indent=4)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Configuration Parameters')
    parser.add_argument('--data-path', default='/media/adt/ZWH4T/ZWH/bags/dataset/tm3/work_space_tmp/datasets',
                        help='your data root path')
    parser.add_argument('--split', type=str, default='9,0,1', help='train_size: test_size: val_size')
    parser.add_argument('--val_datasets', type=str, default='__00,__09', help='验证集名')
    parser.add_argument('--shuffle', type=bool, default=False, help='random choose')
    args = parser.parse_args()

    main(args)
