import cv2


def recognize(campath):
    recognizer = cv2.face.LBPHFaceRecognizer_create()

    recognizer.read('cascades/trainer.yml') 

    cascadePath = "cascades/haarcascade_frontalface_default.xml"

    faceCascade = cv2.CascadeClassifier(cascadePath)
    
    font = cv2.FONT_HERSHEY_DUPLEX
    names = ['1', '王乐欣']
    video = cv2.VideoCapture(campath)

    
    faceCascadePath = "cascades/haarcascade_frontalface_default.xml"
    profiefaceCascadePath = "cascades/haarcascade_profileface.xml"
    eyeCascadePath = "cascades/haarcascade_eye.xml"  
    #
    faceCascade = cv2.CascadeClassifier(faceCascadePath)
    eyeCascade = cv2.CascadeClassifier(eyeCascadePath)
    ProfilefaceCascade = cv2.CascadeClassifier(profiefaceCascadePath)
    ii = 1
    while True:

        
        (grabbed, raw_frame) = video.read()
        
        (height, width) = raw_frame.shape[:2]
        ratio = 900 / width
        dim = (900, int(height * ratio))
        frame = cv2.resize(raw_frame, dim, interpolation=cv2.INTER_AREA)

        grayframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        
        
        faceRects = faceCascade.detectMultiScale(grayframe, \
                                                 scaleFactor=1.1, \
                                                 minNeighbors=5, \
                                                 minSize=(30, 30), \
                                                 flags=cv2.CASCADE_SCALE_IMAGE)
        faceRects_list = []
        eyesRects_list = []

        for (fx, fy, fw, fh) in faceRects:

            # region of interest:faceROI
            faceROI = grayframe[fy:fy + fh, fx:fx + fw]
            
            faceRects_list.append((fx, fy, fx + fw, fy + fh))
            
            eyeRects = eyeCascade.detectMultiScale(faceROI, \
                                                   scaleFactor=1.01, \
                                                   minNeighbors=3, \
                                                   minSize=(30, 30), \
                                                   flags=cv2.CASCADE_SCALE_IMAGE)
            for (ex, ey, ew, eh) in eyeRects:
                
                eyesRects_list.append((fx + ex, fy + ey, fx + ex + ew, fy + ey + eh))

        
        for rect in faceRects:
            (x, y, w, h) = rect
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            idnum, confidence = recognizer.predict(grayframe[y:y + h, x:x + w])
            print(idnum, ii)
            if confidence < 100:
                idnum = names[idnum]
                confidence = "{0}%".format(round(100 - confidence))
            else:
                idnum = "unknown"
                confidence = "{0}%".format(round(100 - confidence))
            cv2.putText(frame, str(idnum), (x + 5, y - 5), font, 1, (0, 0, 255), 1)
            cv2.putText(frame, str(confidence), (x + 5, y + h - 5), font, 1, (0, 0, 0), 1)
        cv2.imshow("tracking", frame)
        ii = ii + 1
        

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video.release()
    cv2.destroyAllWindows()


if __name__ == '__main__':
    videoPath = "shipin.mp4"
    recognize(videoPath)