import pickle
import os
import random

import numpy as np

# 通过cifar-python的二进制文件生成对应的两种文件；
# 一种是将原二进制文件中的每个数据对应的各项组成字典，
# 将该字典的列表写成二进制文件；
# 另一种是txt文件，每行代表着第一种文件中相应位置的，
# 数据的索引和标签
def binary2file(src_path, tgt_path, file_list=None):
    if file_list == None:
        file_list = ['train', 'test']

    for file_name in file_list:
        src_file = os.path.join(src_path, file_name)
        fp = open(src_file, 'rb')
        src_dict = pickle.load(fp, encoding='latin1')
        fp.close()

        tgt_file = os.path.join(tgt_path, file_name+'_list')
        tgt = []
        fp = open(tgt_file+'.txt', 'w')

        for ids in range(len(src_dict['fine_labels'])):
            tgt_dict = {
                'filenames' : src_dict['filenames'][ids],
                'fine_labels' : src_dict['fine_labels'][ids],
                'coarse_labels' : src_dict['coarse_labels'][ids],
                'data' : src_dict['data'][ids],
            }
            tgt.append(tgt_dict)
            fp.write(
                '{} {} {}\n'.format(ids, tgt_dict['fine_labels'], tgt_dict['coarse_labels'])
            )

        fp.close()

        fp = open(tgt_file, 'wb')
        pickle.dump(tgt, fp)
        fp.close()

# 从list文件中读取各类别对应的图片索引号
def lsfile2idsls(listfile):
    coarse_cls = 20
    fine_cls = 100
    
    coarse_ids_list = []
    for i in range(coarse_cls):
        coarse_ids_list.append([])
    fine_ids_list = []
    for i in range(fine_cls):
        fine_ids_list.append([])

    fp = open(listfile)
    for line in fp.readlines():
        line = line.strip('\n').strip()
        contents = line.split()
        fine_ids_list[int(contents[1])].append(int(contents[0]))
        coarse_ids_list[int(contents[2])].append(int(contents[0]))
    fp.close()

    return coarse_ids_list, fine_ids_list

# 输入图片索引与list文件
# 生成由索引数据组成的采样list文件
def ids2lsfile(tgtfile, listfile, ids):
    fp_tgt = open(tgtfile, 'w')
    fp = open(listfile)
    for line in fp.readlines():
        contents = line.strip('\n').strip()
        contents = contents.split(' ')
        if int(contents[0]) in ids:
            fp_tgt.write(line)
    fp.close()
    fp_tgt.close()

# 返回第ids个data_list的内容
def listids2contents(data_list, ids):
    if isinstance(data_list, str):
        fp = open(data_list, 'rb')
        _list = pickle.load(fp, encoding='latin1')
        fp.close()
        data_list = _list

    if not isinstance(ids, list):
        ids_list = [ids]
    else:
        ids_list = ids
    
    contents = []
    for i in ids_list:
        contents.append(data_list[i])

    if not isinstance(ids, list):
        contents = contents[0]

    return contents

# 输入list txt文件与最大类数据量和最小类数据量的比值
# 生成长尾数据集list txt文件
def exp_imbalance(tgtfile, listfile, binarylist, ratio):
    _, fine_ids_list = lsfile2idsls(listfile)
    origin_num = len(fine_ids_list[0])
    max_num = origin_num
    min_num = round(origin_num / ratio)

    mu = pow(min_num / max_num, 1/100)
    num_list = [max_num]
    for i in range(1, 100):
        num_list.append(round(max_num*pow(mu, i)))

    # 随即决定各组的采样顺序
    ids_list = list(range(100))
    random.shuffle(ids_list)

    imbalance_samples = []
    for n, cls in enumerate(ids_list):
        samples = fine_ids_list[cls]
        random.shuffle(samples)
        tmp = samples[0:num_list[n]]
        imbalance_samples.append(tmp)

    samples = []
    for i in imbalance_samples:
        for j in i:
            samples.append(j)

    msg = []
    for i in range(len(fine_ids_list)):
        data_ids = fine_ids_list[i][0]
        data_info = listids2contents(binarylist, data_ids)
        msg.append(data_info['fine_labels'])

    ids2lsfile(tgtfile, listfile, samples)
    print(num_list)
    print(msg)


# 将CIFAR的训练集和测试集以相同的排列形式组成总数据集
def merge_traintest(train, test, tgt_path):
    fp = open(train, 'rb')
    train_dict = pickle.load(fp, encoding='latin1')
    fp.close()
    fp = open(test, 'rb')
    test_dict = pickle.load(fp, encoding='latin1')
    fp.close()

    total_dict = {}
    total_dict['filenames'] = train_dict['filenames'] + test_dict['filenames']
    total_dict['batch_label'] = train_dict['batch_label']
    total_dict['fine_labels'] = train_dict['fine_labels'] + test_dict['fine_labels']
    total_dict['coarse_labels'] = train_dict['coarse_labels'] + test_dict['coarse_labels']
    total_dict['data'] = np.concatenate((train_dict['data'], test_dict['data']), axis=0)

    print(len(total_dict['filenames']))
    print((total_dict['batch_label']))
    print(len(total_dict['fine_labels']))
    print(len(total_dict['coarse_labels']))
    print((total_dict['data'].shape))

    fp = open(tgt_path, 'wb')
    pickle.dump(total_dict, fp)
    fp.close()
        
# 将一个二进制数据集文件或者数据集根据比例划分为训练集与测试集
def split_cifartotal(datasets_list, datasets_listfile, ratio, file_name='total'):
    _, fine_labels_ids = lsfile2idsls(datasets_listfile)
    fp = open(datasets_list, 'rb')
    data_list = pickle.load(fp, encoding='latin1')
    fp.close()
        
    train_list = []
    test_list = []
    for n, cls_ids in enumerate(fine_labels_ids):
        sum = len(cls_ids)
        test_num = round(sum * 1 / (1 + ratio))
        random.shuffle(cls_ids)
        test_list += cls_ids[0:test_num]
        train_list += cls_ids[test_num:]

    fp = open(file_name + '_train_list.txt', 'w')
    for ids in train_list:
        fp.write('%d %d %d\n' % (ids, data_list[ids]['fine_labels'], data_list[ids]['coarse_labels']))
    fp.close()

    fp = open(file_name + '_test_list.txt', 'w')
    for ids in test_list:
        fp.write('%d %d %d\n' % (ids, data_list[ids]['fine_labels'], data_list[ids]['coarse_labels']))
    fp.close()


    # print(len(test_list))
    # print(len(train_list))


if __name__ == '__main__':
    src_path = '../CIFAR/CIFAR100'
    tgt_path = '../CIFAR/CIFAR100'
    # binary2file(src_path, tgt_path)

    ls_path = '../CIFAR/CIFAR100/total_train_list.txt' 
    tgt_path = '../CIFAR/CIFAR100/exp_imb_2.txt'
    binary_list = '../CIFAR/CIFAR100/total_list'
    # exp_imbalance(tgt_path, ls_path, 100)

    # train_path = '../CIFAR/CIFAR100/train'
    # test_path = '../CIFAR/CIFAR100/test'

    # merge_traintest(train_path, test_path, '../CIFAR/CIFAR100/total')
    # binary2file(src_path, tgt_path, ['total'])
    # split_cifartotal('../CIFAR/CIFAR100/total_list', '../CIFAR/CIFAR100/total_list.txt', 119, '../CIFAR/CIFAR100/total')
    exp_imbalance(tgt_path, ls_path, binary_list, 100)


        