import sys
sys.path.append('../face_recog/insightface/deploy')
sys.path.append('../face_recog/insightface/src/common')
import face_preprocess
from imutils import paths
import numpy as np
import face_model
import argparse
import pickle
import cv2
import os

ap = argparse.ArgumentParser()

ap.add_argument("--dataset", default="../datasets/train",
                help="Path to training dataset")
ap.add_argument("--embeddings", default="outputs/embeddings.pickle")

# Argument of insightface
ap.add_argument('--image-size', default='112,112', help='')
ap.add_argument('--model', default='../face_recog/insightface/models/model-y1-test2/model,0', help='path to load model.')
ap.add_argument('--ga-model', default='', help='path to load model.')
ap.add_argument('--gpu', default=0, type=int, help='gpu id')
ap.add_argument('--det', default=0, type=int, help='mtcnn option, 1 means using R+O, 0 means detect from begining')
ap.add_argument('--flip', default=0, type=int, help='whether do lr flip aug')
ap.add_argument('--threshold', default=1.24, type=float, help='ver dist threshold')

args = ap.parse_args()

print(args.gpu)

# Grab the paths to the input images in our dataset
print("[INFO] quantifying faces...")


def add_pickle_embedding(file_dir):
    imagePaths = list(paths.list_images(file_dir))
    embedding_model = face_model.FaceModel(args)

    # Initialize our lists of extracted facial embeddings and corresponding people names
    print(args.embeddings.rindex('/'))
    file_dir = args.embeddings[0 : args.embeddings.rindex('/')]
    print(file_dir)
    if not os.path.isdir(file_dir):
        os.makedirs(file_dir)
    data = {}
    if not os.path.exists(args.embeddings):
        print('---------')
        f = open(args.embeddings, 'w')
        f.write('')
        f.close()
    else:
        #加载pkl文件
        if os.path.getsize(args.embeddings)> 0:
            f = open(args.embeddings, "rb")
            data = pickle.load(f)
            f.close()
    # print(data)


    if 'names' not in data.keys():
        data['partment'] = []
        data['user_id'] = []
        data['embeddings'] = []
        data['names'] = []
    print(len(data['names']))
    total =0
    for (i, imagePath) in enumerate(imagePaths):
        # extract the person name from the image path
        print("[INFO] processing image {}/{}".format(i + 1, len(imagePaths)))
        name = file_dir.split('/')[-1]
        print(name)

        # load the image
        image = cv2.imread(imagePath)
        image = cv2.resize(image,(112,112))
        # convert face to RGB color
        nimg = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        nimg = np.transpose(nimg, (2, 0, 1))
        # Get the face embedding vector
        face_embedding = embedding_model.get_feature(nimg)

        # add the name of the person + corresponding face
        # embedding to their respective list

        data['embeddings'].append(face_embedding)
        data['names'].append(name)

        total +=1
    # print(total, " faces embedded")

    f = open(args.embeddings, "wb")
    f.write(pickle.dumps(data))
    f.close()
    print(len(data['names']))
add_pickle_embedding('../images/chenyicheng')

