import pickle

import bcolz
import cv2
import mxnet as mx
import numpy as np
from PIL import Image, ImageFile
from torch.utils.data import ConcatDataset, DataLoader
from torchvision import transforms as trans
from torchvision.datasets import ImageFolder
from tqdm import tqdm

ImageFile.LOAD_TRUNCATED_IMAGES = True


def de_preprocess(tensor):
    return tensor*0.5 + 0.5


def get_train_dataset(imgs_folder):
    train_transform = trans.Compose([
        trans.RandomHorizontalFlip(),
        trans.ToTensor(),
        trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])
    ds = ImageFolder(imgs_folder, train_transform)
    class_num = ds[-1][1] + 1
    return ds, class_num


def get_train_loader(conf):

    if conf.data_mode == "concat":
        ms1m_ds, ms1m_class_num = get_train_dataset(conf.ms1m_folder/'imgs')
        vgg_ds, vgg_class_num = get_train_dataset(conf.vgg_folder/'imgs')
        print('vgg and ms1m loader generated')
        for i, (url, label) in enumerate(vgg_ds.imgs):
            vgg_ds.imgs[i] = (url, label + ms1m_class_num)
        ds = ConcatDataset([ms1m_ds, vgg_ds])
        class_num = vgg_class_num + ms1m_class_num
    elif conf.data_mode == "emore":
        ds, class_num = get_train_dataset(conf.emore_folder / 'imgs')
        print('emore loader generated')
    elif conf.data_mode == "vgg":
        ds, class_num = get_train_dataset(conf.vgg_folder / 'imgs')
        print('vgg loader generated')
    elif conf.data_mode == "mslm":
        ds, class_num = get_train_dataset(conf.ms1m_folder / 'imgs')
        print('ms1m loader generated')
    else:
        raise Exception("conf.data_mode must be in ['concat', 'emore', 'vgg', 'mslm'] !")

    loader = DataLoader(ds, batch_size=conf.batch_size, shuffle=True, pin_memory=conf.pin_memory,
                        num_workers=conf.num_workers)

    return loader, class_num


def load_bin(path, root_dir, transform, image_size=[112, 112]):

    if not root_dir.exists():
        root_dir.mkdir()

    bins, is_same_list = pickle.load(open(path, 'rb'), encoding='bytes')
    data = bcolz.fill([len(bins), 3, image_size[0], image_size[1]], dtype=np.float32, rootdir=root_dir, mode='w')
    for i in range(len(bins)):
        _bin = bins[i]
        img = mx.image.imdecode(_bin).asnumpy()
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        img = Image.fromarray(img.astype(np.uint8))
        data[i, ...] = transform(img)
        i += 1
        if i % 1000 == 0:
            print('loading bin', i)
    print(data.shape)
    np.save(str(root_dir) + "_list", np.array(is_same_list))

    return data, is_same_list


def get_val_pair(path, name):
    carray = bcolz.carray(rootdir=path/name, mode='r')
    is_same = np.load(path/'{}_list.npy'.format(name))
    return carray, is_same


def get_val_data(data_path):
    agedb_30, agedb_30_issame = get_val_pair(data_path, 'agedb_30')
    cfp_fp, cfp_fp_issame = get_val_pair(data_path, 'cfp_fp')
    lfw, lfw_issame = get_val_pair(data_path, 'lfw')
    return agedb_30, cfp_fp, lfw, agedb_30_issame, cfp_fp_issame, lfw_issame


def load_mx_rec(rec_path):
    """生成imgs文件夹下的图片"""
    save_path = rec_path/'imgs'
    if not save_path.exists():
        save_path.mkdir()
    img_rec = mx.recordio.MXIndexedRecordIO(str(rec_path/'train.idx'), str(rec_path/'train.rec'), 'r')
    img_info = img_rec.read_idx(0)
    header, _ = mx.recordio.unpack(img_info)
    max_idx = int(header.label[0])
    for idx in tqdm(range(1, max_idx)):
        img_info = img_rec.read_idx(idx)
        header, img = mx.recordio.unpack_img(img_info)
        label = int(header.label)
        img = Image.fromarray(img)
        label_path = save_path/str(label)
        if not label_path.exists():
            label_path.mkdir()
        img.save(label_path/'{}.jpg'.format(idx), quality=95)
