import cv2
from scipy.sparse import data
import torch
import pickle 
import config as cfg
from tqdm import tqdm
from os import listdir, rename, rmdir, remove
from os.path import join, split
from numpy import array, concatenate, save, load, rot90
from models.facenet.facenet import Facenet
from face_recognition import face_locations, load_image_file
from sklearn.neighbors import KNeighborsClassifier


def add_people(del_bad_pic=True):
    features = []
    names = []
    warn_msgs = []

    # 加载模型
    path = 'models/facenet/facenet_mobilenet.pth'
    model = Facenet(mode='predict').eval()
    state_dict = torch.load(path, map_location='cpu')
    model.load_state_dict(state_dict, strict=False)

    # 
    names_dir = listdir(cfg.pictures_of_add)
    paths = listdir(cfg.pictures_of_konw)
    for n_dir in names_dir:
        had_num = 1
        for p in paths:     # 该人已有图像计数
            if n_dir in p:
                had_num += 1
        pics = listdir(join(cfg.pictures_of_add, n_dir))
        for pic in pics:
            org_path = join(cfg.pictures_of_add, n_dir, pic)
            img_path = join(cfg.pictures_of_konw, '{0}_{1}{2}'.format(n_dir, had_num, pic[-4:]))
            had_num += 1
            rename(org_path, img_path)

            image = load_image_file(img_path)
            image = cv2.resize(image, (0, 0), fx=0.5, fy=0.5)
            # iPhone拍照要转九十度, 其他情况请注释该行代码
            image = rot90(image, 3)
            faces = []
            boxes = face_locations(image, model='hogf')
            if len(boxes) == 0:
                # 人脸不等于1， 存入消息表，运行完成后统一输出。
                warn_msgs.append("file {0} can't found face, or found more than 1 face".format(img_path))
                if del_bad_pic:
                    remove(img_path)
                    had_num -= 1
                continue
            else:
                # 将人脸特征向量与人名写入列表。
                max_box = None
                size_thres = 0
                for box in boxes:
                    x, y = box[2]-box[0], box[1]-box[3]
                    if size_thres <= x * y:
                        max_box = box
                        size_thres = x * y
                box = max_box
                face = image[box[0]:box[2], box[3]:box[1]]
                face = cv2.cvtColor(face, cv2.COLOR_RGB2BGR)
                face = face / 255.
                faces.append(cv2.resize(face, (160, 160)))
                faces = array(faces).transpose(0, 3, 1, 2)
                faces = torch.from_numpy(faces).type(torch.FloatTensor)
                feature = model(faces).detach().numpy()
                features.append(feature[0])
                name = split(img_path)[1].split('_')[0]
                names.append(name)
        
        rmdir(join(cfg.pictures_of_add, n_dir))
    names = array(names)
    features = array(features)
    return features, names, warn_msgs


if __name__ == '__main__':
    print("loading")

    o_names = load('data/names.npy')
    o_features = load('data/features.npy')
    if o_names == []:
        o_names = [o_names]
    if o_features == []:
        o_features = [o_features]

    print('start encoding faces')
    features, names, warn_msgs = add_people()
    
    if o_names != []:
        features = concatenate((features, o_features), axis=0)
        names = concatenate((names, o_names), axis=0)

    neigh = KNeighborsClassifier(n_neighbors=3, algorithm='ball_tree', weights='distance')
    neigh.fit(features, names)
    with open('data/knnd3.pickle', 'wb') as f:
        pickle.dump(neigh, f)
    save('data/names.npy', names)
    save('data/features.npy', features)

    print('encode faces complete, take {} warnings:'.format(len(warn_msgs)))
    for msg in warn_msgs:
        print('\twarnings:{}'.format(msg))