import os
import random
import shutil

def generate_train_val_test(path, train_rate, test_rate, flag=0):
    '''
    :param path:
    :param train_rate:
    :param test_rate:
    :return:
    '''
    images_path = os.path.join(path, 'JPEGImages')
    labels_path = os.path.join(path, 'labels')
    xml_path = os.path.join(path, 'xml')

    if flag==0:
        rootPath = images_path
    elif flag==1:
        rootPath = labels_path
    else:
        rootPath = xml_path
    train_percent = train_rate
    val_percent = 1 - train_rate
    test_percent = test_rate

    total_images = os.listdir(rootPath)
    data_length = len(total_images)
    test_length = int(data_length * test_percent)
    test_lists = total_images[-test_length:]

    train_val_lists = total_images[:-test_length]
    train_val_len = len(train_val_lists)
    val_len = int(train_val_len * val_percent)

    val_lists = random.sample(train_val_lists, val_len)

    test_file = os.path.join(path, 'test.txt')
    val_file = os.path.join(path, 'val.txt')
    train_file = os.path.join(path, 'train.txt')
    all_file = os.path.join(path, 'all.txt')
    with open(all_file, 'w') as f_all:
        for file in total_images:
            f_all.write(os.path.join(rootPath, file) + '\n')

    f_test = open(test_file, 'w')
    # with open(test_file, 'w') as f_test:
    for file in test_lists:
        f_test.write(os.path.join(rootPath, file) + '\n')

    # f_val = open(val_file, 'w')
    f_train = open(train_file, 'w')
    for train_val in train_val_lists:
        if train_val in val_lists:
            f_test.write(os.path.join(rootPath, train_val) + '\n')
        else:
            f_train.write(os.path.join(rootPath, train_val) + '\n')
    # f_val.close()
    f_train.close()
    f_test.close()
    print('train nums:', data_length - len(val_lists) - len(test_lists), train_val_len - len(val_lists))
    print('val nums:', len(val_lists))
    print('test nums:', len(test_lists))


def image_type(list_path):
    img_type = []
    for path_index in list_path:
        image_path = os.path.join(path_index, 'JPEGImages')
        image_names = os.listdir(image_path)
        for type_image in ['png', 'jpg', 'bmp', 'PNG', 'JPG', 'BMP', 'jpeg', 'JPEG']:
            for img_name in image_names:
                if type_image in img_name and type_image not in img_type:
                    img_type.append(type_image)
    print(img_type)


if __name__ == '__main__':

    # path = '/data/ICE/DataSetCollection/record_b2/labeled/collection'
    # path = '/data/ICE/DataSetCollection/indemind/collection/dataset'
    # path = '/data/ICE/coco/indemind_person_train2017/VOC2007'
    # path = '/data/ICE/DataSetCollection/902_bug_20210826/collection'
    # path = '/data/ICE/DataSetCollection/openData/all_iccv_data'
    # path = '/data/ICE/DataSetCollection/openData/all_usc_data'
    # path = '/data/ICE/DataSetCollection/i18R_902_20210705/record_1/dataset/dataset_v1'
    # path = '/data/ICE/DataSetCollection/i18R_902_20210705/record_2/dataset/dataset_v1'
    # path = '/data/ICE/DataSetCollection/i18R_902_20210705/record_3/dataset/dataset_v1'
    # path = '/data/ICE/DataSetCollection/batch2_batch3_tiny'
    # path = '/data/ICE/DataSetCollection/batch4_06_04_extract'
    # path = '/data/ICE/DataSetCollection/batch5_06_04_extract'
    # path = '/data/ICE/DataSetCollection/batch6_20210610_extract/dataset'
    path_list = [
        # '/data/ICE/DataSetCollection/record_b2/labeled/collection',
        #          '/data/ICE/DataSetCollection/indemind/collection/dataset',
        #          '/data/ICE/DataSetCollection/902_bug_20210826/collection',
        #          '/data/ICE/DataSetCollection/i18R_902_20210705/record_1/dataset/dataset_v1',
        #          '/data/ICE/DataSetCollection/i18R_902_20210705/record_2/dataset/dataset_v1',
        #          '/data/ICE/DataSetCollection/i18R_902_20210705/record_3/dataset/dataset_v1',
        #          '/data/ICE/DataSetCollection/batch2_batch3_tiny',
        #          '/data/ICE/DataSetCollection/batch4_06_04_extract',
        #          '/data/ICE/DataSetCollection/batch5_06_04_extract',
        #          '/data/ICE/DataSetCollection/batch6_20210610_extract/dataset',
        #          '/data/ICE/DataSetCollection/batch7_06_21_valid_extract/dataset',
        #          '/data/ICE/DataSetCollection/escalator_cover/collection',
        #          '/data/ICE/DataSetCollection/i18Rperson_model_dummy/dataset_v1',
        # '/data/ICE/DataSetCollection/openData/all_usc_data',
        '/data/ICE/coco/indemind_train2017/VOC2007'
                 ]
    # image_type(path_list)
    #
    for dataset_path in path_list:
        temp = []
        test_txt = os.path.join(dataset_path, 'test.txt')
        test_all_txt = os.path.join(dataset_path, 'test_all.txt')
        test_val_txt = os.path.join(dataset_path, 'test_val.txt')
        val_txt = os.path.join(dataset_path, 'val.txt')
        train_txt = os.path.join(dataset_path, 'train.txt')
        all_txt = os.path.join(dataset_path, 'all.txt')
        # xml_folder = os.path.join(dataset_path, 'xml')
        temp.append(test_txt)
        temp.append(test_all_txt)
        temp.append(test_val_txt)
        temp.append(val_txt)
        temp.append(train_txt)
        temp.append(all_txt)
        # temp.append(xml_folder)
        for del_path in temp:
            if os.path.exists(del_path):
                if os.path.isfile(del_path):
                    os.remove(del_path)
                else:
                    shutil.rmtree(del_path)
                print(del_path)
        train_rate = 0.95
        test_rate = 0.05
        generate_train_val_test(dataset_path, train_rate, test_rate, 2)
