import os
from pprint import pprint

# from copy_file import dst_dirs
# from my_io import readcsv

# def get_idxes(data_type):
#     file_path = os.path.join(datasets_root, dataset_path, 'samples/split_sets', data_type+'.txt')
#     with open(file_path, 'r') as f:
#         lines = f.readlines()
#     lines = [line.strip().split(' ') for line in lines]
#     lines = [line[0] for line in lines]
#     lines = sorted(lines)
#     indexes = []
#     for line in lines:
#         assert line in sweeps
#         idx = sweeps.index(line)
#         indexes.append(idx)
#     return indexes


if __name__ =='__main__':

    src = '/media/adt/ZWH4T/ZWH/Dataset3d/dataset_18xx-bag/cyw2.0/tmx/datasets'
    dst = '/media/adt/T7/ZWH/docker/files/data/motion/datasets/gg'
    src_dirs = sorted(os.listdir(src))
    dst_dirs = sorted(os.listdir(dst))

    for src_dir, dst_dir in zip(src_dirs, dst_dirs):
        if src_dir != dst_dir[2:]:
            continue
        print(src_dir, dst_dir)
        src_dir = os.path.join(src, src_dir, 'localization/localization.csv')
        dst_dir = os.path.join(dst, dst_dir, 'localization')
        os.system(f'cp -r {src_dir} {dst_dir}')

    #
    # datasets_root = '/media/adt/T7/ZWH/docker/files/data/motion/datasets'
    # assert os.path.exists(datasets_root), f'{datasets_root} does not exist!'
    # dataset_paths = []
    # for dataset_path in sorted(os.listdir(datasets_root)):
    #     if dataset_path.startswith('__'):
    #         dataset_paths.append(dataset_path)
    #
    # for dataset_path in dataset_paths:
    #     sweeps = os.listdir(os.path.join(datasets_root, dataset_path, 'lidar_bin'))
    #     sweeps = sorted(sweeps)
    #     sweeps = [sweep.replace('.bin', '') for sweep in sweeps]
    #     assert os.path.exists(os.path.join(datasets_root, dataset_path, 'samples/split_sets'))
    #
    #
    #     train_idx = get_idxes('train')
    #     val_idx = get_idxes('val')
    #     test_idx = get_idxes('test')
    #     key_index = train_idx+val_idx+test_idx
    #
    #     json_data = {
    #         "key_index": key_index,
    #         "train_idx": train_idx,
    #         "val_idx": val_idx,
    #         "test_idx": test_idx
    #     }
    #
    #     import json
    #     with open(os.path.join(datasets_root, dataset_path, 'samples/samples.json'), 'w') as f:
    #         json.dump(json_data, f, ensure_ascii=False, indent=2)
    #
    #     pass
    #
    #
    #
    #
