import os
import logging
from logging import handlers
# from time import time
import time
import numpy as np
import cv2
import bcolz
import pickle
from PIL import Image
import mxnet as mx
import torchvision.transforms as trans
# 日志输出
class Logger(object):
    # 日志级别关系映射
    level_relations = {
        "debug": logging.DEBUG,
        "info": logging.INFO,
        "warning": logging.WARNING,
        "error": logging.ERROR,
        "critical": logging.CRITICAL
    }

    def __init__(self, filename="test.log", level="info", when="D", backupCount=3,
                 fmt="%(asctime)s - [line:%(lineno)d] - %(message)s"):
        # 设置日志输出格式
        format_str = logging.Formatter(fmt)
        # 设置日志在控制台输出
        streamHandler = logging.StreamHandler()
        # 设置控制台中输出日志格式
        streamHandler.setFormatter(format_str)
        # 设置日志输出到文件（指定间隔时间自动生成文件的处理器  --按日生成）
        log_path = os.path.dirname(os.path.realpath(__file__)) + '/Logs/'
        ensure_folder(log_path)
        log_name = log_path + filename

        # filename：日志文件名，interval：时间间隔，when：间隔的时间单位， backupCount：备份文件个数，若超过这个数就会自动删除
        fileHandler = handlers.TimedRotatingFileHandler(filename=log_name, when=when, backupCount=backupCount,
                                                        encoding="utf-8")
        # 设置日志文件中的输出格式
        fileHandler.setFormatter(format_str)
        # 设置日志输出文件
        self.logger = logging.getLogger(filename)
        # 设置日志级别
        self.logger.setLevel(self.level_relations.get(level))
        # 将输出对象添加到logger中
        self.logger.addHandler(streamHandler)
        self.logger.addHandler(fileHandler)

def get_logger1():
    logger = logging.getLogger()
    handler = logging.StreamHandler()
    formatter = logging.Formatter("%(asctime)s %(levelname)s \t%(message)s")
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    logger.setLevel(logging.INFO)
    return logger

def get_logger(filename):
    logger = logging.getLogger()
    # 第二步，创建一个handler，用于写入日志文件
    log_path = os.path.dirname(os.path.realpath(__file__)) + '/Logs/'
    ensure_folder(log_path)
    log_name = log_path + filename  + '.log'
    print(log_name)
    fh = logging.FileHandler(log_name, mode='a+')

    logger.setLevel(logging.INFO)
    # 第三步，定义handler的输出格式
    formatter = logging.Formatter("%(asctime)s-[line:%(lineno)d] - %(levelname)s: %(message)s")
    fh.setFormatter(formatter)
    # 第四步，将logger添加到handler里面
    logger.addHandler(fh)

    return logger

def ensure_folder(folder):
    if not os.path.isdir(folder):
        os.mkdir(folder)

def load_bin(path, rootdir, transform, image_size=[112,112]):
    ensure_folder(rootdir)

    bins, issame_list = pickle.load(open(path, 'rb'), encoding='bytes')
    data = bcolz.fill([len(bins), 3, image_size[0], image_size[1]], dtype=np.float32, rootdir=rootdir, mode='w')
    for i in range(len(bins)):
        _bin = bins[i]
        img = mx.image.imdecode(_bin).asnumpy()


        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        img = Image.fromarray(img.astype(np.uint8))
        data[i, ...] = transform(img)
        i += 1
        if i % 1000 == 0:
            print('loading bin', i)

    print(data.shape)
    np.save(str(rootdir)+'_list', np.array(issame_list))
    return data, issame_list


def load_one_item(bin):
    img = mx.image.imdecode(bin).asnumpy()

    img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    return img

def load_bin(path,pair_list):
    bins, issame_list = pickle.load(open(path, 'rb'), encoding='bytes')
    imges_dic = {}

    with open(pair_list, 'r') as fd:
        pairs = fd.readlines()
    for i,pair in enumerate(pairs):
        splits = pair.split()
        if splits[0] not in imges_dic:
            imges_dic[splits[0]] = load_one_item(bins[i*2])#字典存储的空间是有限的
        if splits[1] not in imges_dic:
            imges_dic[splits[1]] = load_one_item(bins[i*2+1])
        # if splits[0] not  in imges_dic:
        #     imges_dic[splits[0]] = fr"e:\faces_webface_112x112\lfwimg\{i*2}.jpg"
        # if splits[1] not in imges_dic:
        #     imges_dic[splits[1]] = fr"e:\faces_webface_112x112\lfwimg\{i*2+1}.jpg"
        # if i >= 1631:
        #     print(splits,i)
        #     cv2.imshow('dst', imges_dic[splits[0]])
        #     cv2.imshow('dst1', imges_dic[splits[1]])
        #     cv2.imshow('sss',imges_dic['Doris_Roberts/Doris_Roberts_0001.jpg'])
        #
        #
        #     cv2.waitKey(0)

    return imges_dic

def save_bin(path, rootdir,  image_size=[112, 112]):
    ensure_folder(rootdir)



    bins, issame_list = pickle.load(open(path, 'rb'), encoding='bytes')
    # data = bcolz.fill([len(bins), 3, image_size[0], image_size[1]], dtype=np.float32, rootdir=rootdir, mode='w')
    for i in range(len(bins)):
        _bin = bins[i]
        img = mx.image.imdecode(_bin).asnumpy()
        img = Image.fromarray(img.astype(np.uint8))
        print(fr'{rootdir}\{i}')
        img.save(fr'{rootdir}\{i}.jpg')
        print(fr'{rootdir}\{i}')
        i += 1
        if i % 1000 == 0:
            print('save bin ok', i)
    return True

def parse_img():
    rec_path = fr"e:\faces_webface_112x112"

    bin_files = ['lfw']
    save_bin(rec_path + '/' + (bin_files[0] + '.bin'), rec_path + '/' + bin_files[0] + 'img')

def parse_bin():
    test_transform = trans.Compose([
        trans.ToTensor(),
        trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])


    rec_path = fr"e:\faces_webface_112x112"

    # bin_files = ['agedb_30', 'cfp_fp', 'lfw', 'calfw', 'cfp_ff', 'cplfw', 'vgg2_fp']
    bin_files = ['lfw']

    for i in range(len(bin_files)):
        load_bin(rec_path+ '/'+ (bin_files[i] + '.bin'), rec_path +'/'+ bin_files[i], test_transform)

def get_val_pair(path, name):
    carray = bcolz.carray(rootdir = fr"{path}/{name}", mode='r')
    issame = np.load(f'{path}/{name}_list.npy'.format(name))
    return carray, issame


if __name__ == '__main__':
    # parse_bin()
    # carray,issame = get_val_pair(fr"e:\faces_webface_112x112",'lfw')
    parse_img()

    # parser = argparse.ArgumentParser(description='for face verification')
    # parser.add_argument("-r", "--rec_path", help="mxnet record file path",default='faces_emore', type=str)
    # args = parser.parse_args()
