import cv2
import base64
import FacePlusPlusAPI
import face_recognition
import os
import time
import datetime
import Signed
import queue
import json
from multiprocessing import Process, Queue

# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)

# Initialize some variables
facePP_API_Key = 'Dh_1F9Ax2b5feD7KkAc0nu46v3CVnRRX'
facePP_API_Secret = 'VYKh8ciikCPCrbg4Lm2iuJrcxouqDNXP'
queue_frame = Queue(6)
queue_return = Queue()
queue_Token = Queue()
queue_SearchR = Queue()
jsonFL = json.load(open('faces_list.json', 'r'))

def cv22base64(frame):
    # Convert captured image to JPG
    ret, buffer = cv2.imencode('.jpg', frame)

    # Convert to base64 encoding
    frame_as_text = base64.b64encode(buffer)

    return frame_as_text

def getFrame(q):
    gap = 0.042
    last = 0
    while True:
        # Grab a single frame of video
        ret, frame = video_capture.read()
        # Clear queue when it is full
        if q.full():
            try:
                while True:
                    q.get_nowait()
            except queue.Empty:
                pass
        if (time.time() - last) > gap:
            q.put(frame)
            last = time.time()
        #print('queueFrame size: ', q.qsize())

def findFaceByWeb(qF, qR):
    gap = 0.5
    last = 0
    while True:
        if (time.time() - last) > gap:
            frame = qF.get(True)
            frame_as_text = cv22base64(frame)
            faces = FacePlusPlusAPI.faceppDetectAPI_base64(
                facePP_API_Key, facePP_API_Secret, frame_as_text
            )
            face_locations = [i["face_rectangle"] for i in faces]
            face_encodings = [i["face_token"] for i in faces]
            #print('find face:', face_locations, face_encodings)
            qR.put([face_locations, face_encodings])
            last = time.time()

def imshow(qF, qR, qT, qTR):
    last = 0
    flag = False
    signedTime = 0
    screenWidth = 1024
    screenHeight = 1280
    window_name = 'full'
    cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty(
        window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
    while True:
        # Get frame
        frame = qF.get(True)
        
        # Get face detect result
        if qR.empty():
            face_locations = []
            face_encodings = []
            print('imshow, qR.empty')
        else:
            face_locations, face_encodings = qR.get(True)
        
        # Get rectangle data
        face_location = None
        if len(face_locations) == 1:
            face_location = face_locations[0]
            top = face_location["top"]
            left = face_location["left"]
            height = face_location["height"]
            width = face_location["width"]
            bottom = top + height
            right = left + width
        else:
            height = 0
        
        # Get face token
        if len(face_encodings) == 1:
            face_encoding = face_encodings[0]

        # Draw box
        font = cv2.FONT_HERSHEY_DUPLEX
        if isFaceCenter(face_location):
            # Get face search result
            if qTR.empty():
                qT.put(face_encoding)
                searchResult = 'waiting'
            else:
                searchResult = qTR.get(True)

            # Draw on frame
            if searchResult == 'waiting':
                # While searching
                color = (255, 0, 0)
                cv2.rectangle(frame, (192, 112), (448, 384), color, 2)
                text = 'Searching...'
                cv2.putText(frame, text, (192, 418), font, 1.0, color, 1)
            elif searchResult[1] > 0:
                # Search result in local set
                color = (0, 255, 0)
                cv2.rectangle(frame, (192, 112), (448, 384), color, 2)
                name = token2name(searchResult[0])
                if not flag:
                    signedTime = Signed.write(name, datetime.datetime.today())
                    flag = not flag
                if signedTime > 0:
                    text = 'Already sign-in'
                else:
                    text = 'Sign-in success'
                print(signedTime)
                cv2.putText(frame, text, (192, 418), font, 1.0, color, 1)
            elif searchResult[1] == 0:
                # Search result not in local set
                color = (0, 165, 255)
                cv2.rectangle(frame, (192, 112), (448, 384), color, 2)
                text = 'Sign-in fail'
                cv2.putText(frame, text, (192, 418), font, 1.0, color, 1)

            if qTR.empty():
                qTR.put(searchResult)
        else:
            flag = not flag
            # face not in center
            color = (0, 0, 255)
            cv2.rectangle(frame, (192, 112), (448, 384), color, 2)
            text = 'Face not in position'
            cv2.putText(frame, text, (162, 418), font, 1.0, color, 1)
            # Clear queue
            if not qTR.empty():
                try:
                    while True:
                        qTR.get_nowait()
                except queue.Empty:
                    pass

        # FPS
        gap = time.time() - last
        last = time.time()
        x = format(1 / gap, '.0f')
        cv2.putText(frame, x, (10, 50), font, 1.0, (0, 255, 0), 1)

        # Display the resulting image
        cv2.imshow(window_name, frame)

        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        if qR.empty():
            qR.put([face_locations, face_encodings])

def isFaceCenter(face_location):
    if face_location == None:
        return False
    gapOfLeft = abs(face_location["left"] - 192)
    gapOfTop = abs(face_location["top"] - 112)
    gapOfHeight = face_location["height"] - 256
    if gapOfLeft < 100 and gapOfTop < 100 and abs(gapOfHeight) < 60:
        return True
    elif gapOfHeight > 40:
        return True
    else:
        return False

def searchFace(qToken, qTR):
    while True:
        if not qToken.empty():
            token = qToken.get(True)
            qTR.put('waiting')
            result = FacePlusPlusAPI.searchFaceAPI_token(
                facePP_API_Key, facePP_API_Secret, token, 'test')
            confidence = result[1][0]['confidence']
            face_Token = result[1][0]['face_token']
            if confidence > result[0]['1e-5']:
                confidence = 3
            elif confidence > result[0]['1e-4']:
                confidence = 2
            elif confidence > result[0]['1e-3']:
                confidence = 1
            else:
                confidence = 0
            # Clear queue
            try:
                while True:
                    qTR.get_nowait()
            except queue.Empty:
                pass
            qTR.put([face_Token, confidence])

def token2name(token):
    return jsonFL['token2face'][token]

if __name__ == '__main__':
    pGetFrame = Process(target = getFrame, args = (queue_frame, ))
    pFindFace = Process(target = findFaceByWeb, args = (queue_frame, queue_return))
    pImageShow = Process(target = imshow, args = (queue_frame, queue_return, queue_Token, queue_SearchR))
    pSearchFace = Process(target = searchFace, args = (queue_Token, queue_SearchR))
    
    pGetFrame.start()
    pFindFace.start()
    pImageShow.start()
    pSearchFace.start()

    pImageShow.join()
    pGetFrame.terminate()
    pFindFace.terminate()
    pSearchFace.terminate()

    # Release handle to the webcam
    video_capture.release()
    cv2.destroyAllWindows()
