#!/usr/bin/env python3
#coding:utf-8

__author__ = 'xmxoxo<xmxoxo@qq.com>'

'''
测试数据：挑选10个不同人，各3张照片作为照片库，输入一张照片进行搜索
为了方便挑选的目录只含单人，即目录名不包含"_"
'''
from baselibs import *

import os
import json
import sys
import random
from faces_lib import *
from shutil import copyfile
from tqdm import tqdm

import fire

def get_all_folderer(filepath):
    '''获取所有子目录名
    '''
    for dirname in os.listdir(filepath):
        file_path = os.path.join(filepath, dirname)
        if os.path.isdir(file_path):
            yield dirname


'''
挑选目录，并从中随机挑选文件
'''
def pickfiles(filepath, outpath, num_folder=10, num_face=5, copy_file=1):
    outfile = os.path.join(outpath, 'faces.txt')
    if os.path.exists(outfile):
        # 已存在，直接读取
        txts = readtxt(outfile)
        result = txts.splitlines()
    else:
        #all_folder = get_all_folderer(filepath)
        all_folder = get_files(filepath, folder=1)

        # 过滤目录名中没有"_"的目录
        #all_folder = filter(lambda x:not '_' in x[1], all_folder)
        all_folder = list(map(lambda x:x[0] + '/', all_folder))

        # 随机选10个子目录
        if num_folder>0:
            num_folder = min(num_folder, len(all_folder))
            all_folder = random.sample(all_folder, num_folder)

        print('pick %d folders:' % len(all_folder))

        # 从随机选中的子目录中各选5张照片
        result = []
        for subpath in all_folder:
            all_faces = get_files(subpath, folder=0)
            all_faces = list(map(lambda x:x[0], all_faces))
            if num_face>0:
                num_face = min(num_face, len(all_faces))
                all_faces = random.sample(all_faces, num_face)
                #print('pick %d faces:%s' % (len(all_faces), subpath))
            result.extend(all_faces)

        # copy
        print('copy images....')
        mkfold(outpath)
        if copy_file==1:
            for filename in result:
                fn = os.path.split(filename)[1]
                copyfile(filename, os.path.join(outpath, fn))

        # 保存到索引文件中
        savetofile ('\n'.join(result), outfile)
        print('faces.txt saved...')

    print('result: %d files.' % len(result))

    '''
    for t in result:
        print(t) #.encode(), os.path.exists(t))
    return 0
    '''

    # 提取人脸的特征向量，保存到文件中
    # 改为分批预测
    '''
    # 提取向量特征
    sample = load_images(result)
    print('sample:', sample.shape, type(sample))

    print('get vector....')
    model_scores = get_model_scores(sample)

    # 保存到文件
    np.save(os.path.join(outpath, 'vec.npy'), model_scores)
    print('vec.npy saved...')
    '''
#-----------------------------------------
    #加载模型
    from keras_vggface.utils import preprocess_input
    from keras_vggface.vggface import VGGFace
    from tqdm import tqdm
    from math import ceil

    model = VGGFace(model='resnet50',
              include_top=False,
              input_shape=(224, 224, 3),
              pooling='avg')

    # 数据 batch分段
    batch_size = 128
    total = len(result)
    sentlist = [result[i*batch_size:(i+1)*batch_size] for i in range(ceil(total/batch_size))]

    print('get vector....')
    vfile = os.path.join(outpath, 'vec.npy')

    '''
    # 方案1：逐个保存后面再合并
    #for i, dat in tqdm(enumerate(sentlist), ncols=60):
    for i in tqdm(range(len(sentlist)), ncols=60):
        dat = sentlist[i]
        sample = load_images(dat)
        samples = preprocess_input(sample, version=2)
        vectors = model.predict(samples)
        tfile = os.path.join(outpath, 'vec_%d.npy'%i)
        np.save(tfile, vectors)

    # 合并到一个文件
    out_vecs = None
    for i in range(len(sentlist)):
        tfile = os.path.join(outpath, 'vec_%d.npy'%i)
        vectors = np.load(tfile)
        if out_vecs is None:
            out_vecs = vectors.copy()
        else:
            out_vecs = np.vstack([out_vecs, vectors])

    np.save(vfile, out_vecs)
    '''

    #-----------------------------------------
    # 方案2：在内存中合并，最后保存；
    out_vecs = None
    for dat in tqdm(sentlist, ncols=60):
        sample = load_images(dat)
        samples = preprocess_input(sample, version=2)
        vectors = model.predict(samples)
        # 方案2：在内存中合并，最后保存；
        if out_vecs is None:
            out_vecs = vectors.copy()
        else:
            out_vecs = np.vstack([out_vecs, vectors])

    #save to file
    np.save(vfile, out_vecs)

    '''
    #-----------------------------------------
    # 方案3：追加写入, 需要使用savetxt格式
    with open(vfile, mode='a+') as f:
        for dat in tqdm(sentlist, ncols=60):
            sample = load_images(dat)
            samples = preprocess_input(sample, version=2)
            vectors = model.predict(samples)
            np.savetxt(f, vectors)
    #-----------------------------------------
    '''
    print('vector saved...')

#-----------------------------------------

# 提取图片对应人名的字典
def create_dict():

    fn = './test_images_all/faces1.txt'
    txts = readtxt(fn)
    result = txts.splitlines()
    print('result:', len(result))
    ret = dict()
    for txt in result:
        pname = txt.split('/')[2]
        pic = txt.split('/')[3]
        pid = pic.split('_')[1]
        if pname in ret.keys():
            if not pid in ret[pname]:
                ret[pname].append(pid)
        else:
            ret[pname] = [pid]

    # 保存字典数据
    sdat = json.dumps(ret)
    outfile = './test_images_all/person.txt'
    savetofile (sdat, outfile)
    print('person saved...')

def face_feature(path):
    '''提取目录中的所有人脸的特征
    '''
    from math import ceil

    # 提取文件名
    result = [x[0] for x in get_files(path, folder=0)]
    print('正在保存文件清单...')
    ofile = os.path.join(path, 'filelist.txt')
    savetofile('\n'.join(result), ofile)

    # 加载模型
    model = load_vggface_model()

    # 数据 batch分段
    batch_size = 128
    total = len(result)
    sample_list = [result[i*batch_size:(i+1)*batch_size] for i in range(ceil(total/batch_size))]

    print('正在提取向量....')
    out_vecs = None
    for dat in tqdm(sample_list, ncols=60):
        samples = load_images(dat)
        #samples = preprocess_input(samples, version=2)
        #vectors = model.predict(samples)
        vectors = get_model_scores(model, samples)

        # 在内存中合并
        if out_vecs is None:
            out_vecs = vectors.copy()
        else:
            out_vecs = np.vstack([out_vecs, vectors])

    #save to file
    print('正在保存向量文件....')
    vfile = os.path.join(path, 'vec.npy')
    np.save(vfile, out_vecs)
    print('特征向量文件已保存....')


def get_all_titles():
    path = r'H:\movie\downloadpic\download'
    all_folder = get_all_folderer(path)

    outfile = 'ner_model/data/titles.txt'
    txt = '\n'.join(all_folder)
    savetxt(txt, outfile)
    print(f'文件已保存：{outfile}')


if __name__ == '__main__':
    pass

    # 生成特征向量文件
    filepath = './output/'
    #outpath = './test_images/'
    #pickfiles(filepath, outpath, num_folder=10, num_face=5, copy_file=1)

    #outpath = './test_images_500/'
    #pickfiles(filepath, outpath, num_folder=500, num_face=100, copy_file=0)

    outpath = './test_images_all/'
    #pickfiles(filepath, outpath, num_folder=0, num_face=0, copy_file=0)

    # 创建字典
    # create_dict()

    #  小朋友头像提取特征
    path = './orgian/'
    # face_feature(path)

    # get_all_titles()

    fire.Fire()