# -*- coding: utf-8 -*- 
import cv2
import time
import os
import face_recognition
import time
import pickle
from flask import jsonify
# import RPi.GPIO as GPIO

class face_detector():
    def __init__(self,results=[]):
        self.image_extensions = ['png', 'jpg', 'jpeg', 'gif']
        self.process_this_frame = True
        self.location = 'results.pkl'
        self.monitor = 17
        if len(results) >0:
            self.results = self.load_weigth()
        else:
            self.results = results

    def load_weigth(self):
        if os.path.exists(self.location):
            with open(self.location,'rb') as f:
                results = pickle.load(f)

        else:
            results = self.save_weigth(self.location)
        return results

    def save_weigth(self):
        results = self.load_faces("./images/")
        with open(self.location, 'wb') as f:
            pickle.dump(results, f)
    

    def load_faces(self, path):
        print("loading training data")
        image_extensions = self.image_extensions
        names = [i for i in os.listdir(path) if "." not in i]
        known_names = []
        known_encodings = []
        for name in names:
            sub_path = os.path.join(path, name)
            for image_name in os.listdir(sub_path):
                if image_name.rsplit(".")[1].lower() in image_extensions:
                    load_image = face_recognition.load_image_file(os.path.join(sub_path, image_name))
                    face_feature = face_recognition.face_encodings(load_image)
                    if len(face_feature) > 0:
                        image_face_encoding = face_feature[0]
                        known_names.append(name)
                        known_encodings.append(image_face_encoding)
        return known_names, known_encodings

    def countFace(self):
        cap = cv2.VideoCapture(0)
        ret, frame = cap.read()
        rgb_frame = frame[:, :, ::-1]
        known_names = self.results[0]
        known_encodings = self.results[1]
        if self.process_this_frame:
            face_locations = face_recognition.face_locations(rgb_frame)  # 获得所有人脸位置
            face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)  # 获得人脸特征值
            face_names = []
            for face_encoding in face_encodings:
                matches = face_recognition.compare_faces(known_encodings, face_encoding, tolerance=0.5)
                if True in matches:
                    first_match_index = matches.index(True)
                    name = known_names[first_match_index]
                else:
                    name = "unknown"
                face_names.append(name)
            return [len(face_names),list(set(face_names))]

    def countFaceLive(self,ts = 100,flg=False):
        start = time.time()
        known_names = self.results[0]
        known_encodings = self.results[1]
        process_this_frame = True
        while True:
            cap = cv2.VideoCapture(0)
            ret, frame = cap.read()
            rgb_small_frame = frame[:, :, ::-1]
            if process_this_frame:
                face_locations = face_recognition.face_locations(rgb_small_frame)  # 获得所有人脸位置
                face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)  # 获得人脸特征值
                face_names = []
                for face_encoding in face_encodings:
                    matches = face_recognition.compare_faces(known_encodings, face_encoding, tolerance=0.5)
                    if True in matches:
                        first_match_index = matches.index(True)
                        name = known_names[first_match_index]
                    else:
                        name = "unknown"
                    face_names.append(name)
                for (top, right, bottom, left), name in zip(face_locations, face_names):

                    cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
                    cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
                    font = cv2.FONT_HERSHEY_DUPLEX
                    cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
                cv2.imshow('Video', frame)
                print([len(face_names), list(set(face_names))])

        video_capture.release()
        cv2.destroyAllWindows()
        return [len(face_names), list(set(face_names))]

    def getFaces(self,ts=2):
        init = time.time()
        faces =[]
        while True:
            faces.append(self.countFace())
            print(faces)
            if time.time() - init >ts:
                faces.sort(key = lambda x:(x[0]),reverse=True)
                face_result = {"person_count":faces[0][0],"name_list":faces[0][1]}
                print(face_result)
                return face_result

    def getFacesLive(self, ts=4):
        init = time.time()
        faces = []
        while True:
            faces.append(self.countFaceLive())
            print(faces)
            if time.time() - init > ts:
                faces.sort(key=lambda x: (x[0]), reverse=True)
                face_result = {"person_count": faces[0][0], "name_list": faces[0][1]}
                print(face_result)
                return face_result

    def mointer_without_ray(self):
        while True:
            result = self.getFaces()
            print(result)
            with open("result_moniter.pkl", "wb") as file:
                pickle.dump(result,file)
    # def trigger(self):
    #      GPIO.setwarnings(False)
    #      GPIO.setmode(GPIO.BCM)
    #      GPIO.setup(self.monitor ,GPIO.IN)
    #      print(GPIO.input(self.monitor))
    #      flg =True
    #      while flg:
    #          if GPIO.input(self.monitor) == True:
    #              result = self.getFaces()
    #              with open("result_moniter.pkl","wb") as file:
    #                  pickle.dump(result,file)
    #          else:
    #              print("sense not detect person")
    #              time.sleep(1)
    #

if __name__=="__main__":
    with open("results.pkl", 'rb') as f:
        results = pickle.load(f)
    fd = face_detector(results)
    fd.countFaceLive()
    # print(fd.trigger())


