import os
import cv2
from sklearn import preprocessing
from sklearn import decomposition
import numpy as np
import time

def detFaceByCamra(name):
    imgpath = []
    labels = []
    path = "./faces/train/"
    allperson = os.listdir(path)
    for i in allperson:
        personLst = os.listdir(path + str(i))
        for j in personLst:
            thisimg = path + str(i) + '/' + str(j)
            imgpath.append(thisimg)
            labels.append(str(i))

    pLabel = preprocessing.LabelEncoder()
    pLabel.fit(labels)
    intLabels = [pLabel.transform([i])[0] for i in labels]

    # input pics
    train_img = []
    train_labels = []
    face = cv2.CascadeClassifier("./haarcascade_frontalface_alt.xml")
    n = 0
    for img in imgpath:
        img = cv2.imread(img)
        imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        face_area = face.detectMultiScale(imgGray, 1.5, 5)
        for (x, y, w, h) in face_area:
            thisface = imgGray[y:y+h, x:x+h]
            train_img.append(thisface)
            thislabel = intLabels[n]
            train_labels.append(thislabel)
        n += 1

    # PCA
    train_img2 = []
    allfeatures = 0
    for i in range(0, len(train_img)):
        pca = decomposition.PCA()
        ft1 = pca.fit(train_img[i])
        ft1 = pca.explained_variance_
        ft2_num = len(np.where(ft1 > 0.6)[0])
        allfeatures += ft2_num
        print(ft2_num)
        pca.n_components = 67
        # feature down dims:
        thisimgft = pca.fit_transform(train_img[i])
        train_img2.append(thisimgft)
    print("ave feature: " + str(allfeatures / len(train_img)))
    train_labels = np.array(train_labels)

    cap =  cv2.VideoCapture(0)
    rec = cv2.face.LBPHFaceRecognizer_create()
    rec.train(train_img2, train_labels)

    font = cv2.FONT_HERSHEY_TRIPLEX
    au_count = 0
    while True:
        ret, img =  cap.read()
        img = cv2.resize(img, None, fx = 1, fy = 1)
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        face_area =  face.detectMultiScale(img, 1.5, 5)
        for (x, y, w, h) in face_area:
            rst, b = rec.predict(img_gray[y:y+h, x:x+w])
            # cv2.imwrite(path + str(nCount) + ".jpg", img_gray)
            rst2 = pLabel.inverse_transform([rst])[0]
            print("This person is " + str(rst2))
            cv2.putText(img, str(rst2), (20, 20), font, 1, (0, 0, 255), 1, False)
            if (rst2 == name):
                au_count += 1
        cv2.imshow("My Capture", img)
        time.sleep(0.1)
        cv2.waitKey(1)
        if (au_count == 5):
            return 0


if __name__ == '__main__':
    print("欢迎使用身份验证系统")
    name = input("Please input your name: ")
    if 0 == detFaceByCamra(name):
        print("验证通过")
    print("All done.")


