import threading
import time
import mediapipe as mp
import math
import cv2
import subprocess


def init():  # 将config文件内的信息读入到字典中
    f = open('config.txt')
    global Total_face_num
    Total_face_num = int(f.readline())
    for i in range(int(Total_face_num)):
        line = f.readline()
        id_name = line.split(' ')
        id_dict[int(id_name[0])] = id_name[1]
    f.close()

def vector_2d_angle(v1,v2):
    '''
        求解二维向量的角度,针对非TOF摄像头
    '''
    v1_x=v1[0]
    v1_y=v1[1]
    v2_x=v2[0]
    v2_y=v2[1]
    try:
        angle_= math.degrees(math.acos((v1_x*v2_x+v1_y*v2_y)/(((v1_x**2+v1_y**2)**0.5)*((v2_x**2+v2_y**2)**0.5))))
    except:
        angle_ =65535.
    if angle_ > 180.:
        angle_ = 65535.
    return angle_

def h_gesture(angle_list):
    '''
        # 二维约束的方法定义手势
        # fist five gun love one six three thumbup yeah
    '''
    thr_angle = 65.
    thr_angle_thumb = 53.
    thr_angle_s = 49.
    gesture_str = None
    if 65535. not in angle_list:
        if (angle_list[0]>thr_angle_thumb) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):
            gesture_str = "1"
        elif (angle_list[0]<thr_angle_s) and (angle_list[1]<thr_angle_s) and (angle_list[2]<thr_angle_s) and (angle_list[3]<thr_angle_s) and (angle_list[4]<thr_angle_s):
            gesture_str = "5"
        elif (angle_list[0]>5)  and (angle_list[1]<thr_angle_s) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):
            gesture_str = "1"
        elif (angle_list[0]<thr_angle_s)  and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]<thr_angle_s):
            gesture_str = "6"
        elif (angle_list[0]>thr_angle_thumb)  and (angle_list[1]<thr_angle_s) and (angle_list[2]<thr_angle_s) and (angle_list[3]<thr_angle_s) and (angle_list[4]>thr_angle):
            gesture_str = "3"
        elif (angle_list[0]>thr_angle_thumb)  and (angle_list[1]<thr_angle_s) and (angle_list[2]<thr_angle_s) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):
            gesture_str = "2"
    return gesture_str

def hand_angle(hand_):
    '''
        获取对应手相关向量的二维角度,根据角度确定手势
    '''
    angle_list = []
    #---------------------------- thumb 大拇指角度
    angle_ = vector_2d_angle(
        ((int(hand_[0][0])- int(hand_[2][0])),(int(hand_[0][1])-int(hand_[2][1]))),
        ((int(hand_[3][0])- int(hand_[4][0])),(int(hand_[3][1])- int(hand_[4][1])))
        )
    # ui.printf('大拇指角度为：' + str(angle_))
    angle_list.append(angle_)
    #---------------------------- index 食指角度
    angle_ = vector_2d_angle(
        ((int(hand_[0][0])-int(hand_[6][0])),(int(hand_[0][1])- int(hand_[6][1]))),
        ((int(hand_[7][0])- int(hand_[8][0])),(int(hand_[7][1])- int(hand_[8][1])))
        )
    # ui.printf('食指角度为：' + str(angle_))
    angle_list.append(angle_)
    #---------------------------- middle 中指角度
    angle_ = vector_2d_angle(
        ((int(hand_[0][0])- int(hand_[10][0])),(int(hand_[0][1])- int(hand_[10][1]))),
        ((int(hand_[11][0])- int(hand_[12][0])),(int(hand_[11][1])- int(hand_[12][1])))
        )
    # ui.printf('中指角度为：' + str(angle_))
    angle_list.append(angle_)
    #---------------------------- ring 无名指角度
    angle_ = vector_2d_angle(
        ((int(hand_[0][0])- int(hand_[14][0])),(int(hand_[0][1])- int(hand_[14][1]))),
        ((int(hand_[15][0])- int(hand_[16][0])),(int(hand_[15][1])- int(hand_[16][1])))
        )
    # ui.printf('无名指角度为：' + str(angle_))
    angle_list.append(angle_)
    #---------------------------- pink 小拇指角度
    angle_ = vector_2d_angle(
        ((int(hand_[0][0])- int(hand_[18][0])),(int(hand_[0][1])- int(hand_[18][1]))),
        ((int(hand_[19][0])- int(hand_[20][0])),(int(hand_[19][1])- int(hand_[20][1])))
        )
    # ui.printf('小拇指角度为：' + str(angle_))
    angle_list.append(angle_)
    return angle_list

def my_face_recognize():
    global face_recognize_result
    global img,detection,conf
    time.sleep(1)
    # print(time.time())

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    y=int(detection.location_data.relative_bounding_box.ymin*H_size)-1
    x=int(detection.location_data.relative_bounding_box.xmin*W_size)-1
    h=int(detection.location_data.relative_bounding_box.height*H_size)+2
    w=int(detection.location_data.relative_bounding_box.width*W_size)+2
    if(x<1):
        x=1
    if(y<1):
        y=1
    if(x+w>W_size):
        w=int(W_size-x-1)
    if(y+h>H_size):
        h=int(H_size-y-1)
    # print(str(x)+" "+str(y)+" "+str(x+w)+" "+str(y+h)+" "+str(W_size)+" "+str(H_size))
    # 调用分类器的预测函数，接收返回值标签和置信度
    for i in range(Total_face_num):  # 每个识别器都要用
        i += 1
        yml = str(i) + ".yml"
        print("本次:" + yml)  # 调试信息
        recognizer.read(yml)
        idnum, confidence = recognizer.predict(gray[y:y + h, x:x + w])
        # 计算出一个检验结果
        conf = round(100 - confidence)
        if confidence < 100:  # 可以识别出已经训练的对象——直接输出姓名在屏幕上
            if idnum in id_dict:
                user_name = id_dict[idnum]
            else:
                user_name = "Untagged user:" + str(idnum)
                conf=0
            confidence = "{0}%".format(round(100 - confidence))
        else:  
            user_name = "unknown"
            conf=0
        # 加载一个字体用于输出识别对象的信息
        # font = cv2.FONT_HERSHEY_SIMPLEX
        # 输出检验结果以及用户名
        # cv2.putText(img, str(user_name), (x + 5, y - 5), font, 1, (0, 0, 255), 1)
        # cv2.putText(img, str(confidence), (x + 5, y + h - 5), font, 1, (255, 255, 255), 2)
        print(str(user_name)+" "+str(confidence))

        # print(time.time())
        if conf>60:
            face_recognize_result=True
            print(user_name+' passed facial recognition with '+str(conf)+'% similarity')
        else:
            face_recognize_result=False

face_recognize_result=False
def call_face_recognize():
    global has_face
    while True:
        if(has_face):
            my_face_recognize()
            # print(result)
        # time.sleep(1)  # 每秒调用一次face_recognize
        if face_recognize_result or gesture_recognize_result:
            return

gesture_password='15'
enter_passwd=''
gesture_recognize_result=False
def call_gesture_recognize_and_face_detect():
    global success, img, camera,detection,has_face,gesture_recognize_result,enter_passwd,gesture_password
    last_gesture_str=''
    last_gesture_enter_time=0
    while True:
        if gesture_recognize_result or face_recognize_result:
            return
        success, img = camera.read()
        # img= cv2.flip(img,1)    # 因为摄像头是镜像的，所以将摄像头水平翻转

        frame = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        
        # 不是镜像的可以不翻转
        results = hands.process(frame)
        if results.multi_hand_landmarks:
            for hand_landmarks in results.multi_hand_landmarks:
                mp_drawing.draw_landmarks(img, hand_landmarks, mp_hands.HAND_CONNECTIONS)
                hand_local = []
                for i in range(21):
                    x = hand_landmarks.landmark[i].x * frame.shape[1]
                    y = hand_landmarks.landmark[i].y * frame.shape[0]
                    hand_local.append((x, y))
                if hand_local:
                    angle_list = hand_angle(hand_local)
                    gesture_str = h_gesture(angle_list)
                    # print('手势识别结果为： ' + str(gesture_str))
                    # ui.printf('手势识别结果为： ' + str(gesture_str))
                    cv2.putText(img, gesture_str, (0, 100), 0, 1.3, (0, 0, 255), 3)
                    # print(gesture_str)

                    if gesture_str!=None and last_gesture_str!=gesture_str:
                        if enter_passwd=='' or time.time()-last_gesture_enter_time>2.0:
                            enter_passwd=gesture_str
                        else:
                            enter_passwd+=gesture_str
                        last_gesture_str=gesture_str
                        last_gesture_enter_time=time.time()


                        if len(enter_passwd)>=len(gesture_password):
                            if enter_passwd==gesture_password:
                                print('Gesture code pass')
                                gesture_recognize_result=True
                                enter_passwd=''
                                # time.sleep(1.0)
                            else:
                                gesture_recognize_result=False
                            enter_passwd=enter_passwd[1:]
                    print('enter_passwd:'+enter_passwd)


        # 将每一帧图像传给人脸检测模块
        results = faceDetection.process(frame)
        
        # 如果检测不到人脸那就返回None
        if results.detections:
            
            # 返回人脸关键点索引index，和关键点的坐标信息
            for index, detection in enumerate(results.detections):
                
                # 遍历每一帧图像并打印结果
                # print(index, detection)  
                # 每帧图像返回一次是人脸的几率，以及识别框的xywh，后续返回关键点的xy坐标
                # print(detection.score)  # 是人脸的的可能性
                # print(detection.location_data.relative_bounding_box)  # 识别框的xywh
                
                # 绘制关键点信息及边界框
                mpDraw.draw_detection(img, detection)

                has_face=True

        # 展示结果

        # cv2.imshow('camera', img)
        cv2.waitKey(1)

def unlock_screen(username, password):
    time.sleep(0.5)
    # 模拟键盘输入密码
    subprocess.run(['xdotool', 'type', password])
    time.sleep(0.7)  # 等待输入完成

    # 模拟按下Enter键
    subprocess.run(['xdotool', 'key', 'Return'])
    time.sleep(0.5)


# 首先读取config文件，第一行代表当前已经储存的人名个数，接下来每一行是（id，name）标签和对应的人名
id_dict = {}  # 字典里存的是id——name键值对
Total_face_num = 999  # 已经被识别有用户名的人脸个数,
init()
# 加载OpenCV人脸检测分类器Haar
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# 准备好识别方法LBPH方法
recognizer = cv2.face.LBPHFaceRecognizer_create()
# 打开标号为0的摄像头
camera = cv2.VideoCapture(0)  # 摄像头

mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(
        static_image_mode=False,
        max_num_hands=2,
        min_detection_confidence=0.75,
        min_tracking_confidence=0.75)


# print(camera.isOpened())
while not camera.isOpened():
    time.sleep(0.5)
    print('等待摄像头')
success, img = camera.read()  # 从摄像头读取照片
W_size =  camera.get(3)
H_size =  camera.get(4)
system_state_lock = 0  # 标志系统状态的量 0表示无子线程在运行 1表示正在刷脸 2表示正在录入新面孔。


# 导入人脸识别模块
mpFace = mp.solutions.face_detection
# 导入绘图模块
mpDraw = mp.solutions.drawing_utils
# 自定义人脸识别方法，最小的人脸检测置信度0.5
faceDetection = mpFace.FaceDetection(min_detection_confidence=0.5)



# detection=None
# detection.location_data.relative_bounding_box=None

has_face=False



isLocked=False
while True:
    if not isLocked:
        result = subprocess.run(['xfce4-screensaver-command', '-q'], capture_output=True, text=True)
        if 'The screensaver is active' in result.stdout:
            gesture_recognize_result=False
            face_recognize_result=False
            isLocked=True
            print('启动人脸监视和手势监视')
            thread_face_recognize = threading.Thread(target=call_face_recognize)
            thread_face_recognize.start()
            thread_gesture_recognize_and_face_detect = threading.Thread(target=call_gesture_recognize_and_face_detect)
            thread_gesture_recognize_and_face_detect.start()
        time.sleep(1.0)
    else:
        if  gesture_recognize_result or  face_recognize_result :
        # if  gesture_recognize_result :
            isLocked=False
            unlock_screen('HwHiAiUser', '000000') 

# thread_gesture_recognize_and_face_detect = threading.Thread(target=call_gesture_recognize_and_face_detect)
# thread_gesture_recognize_and_face_detect.start()
# thread_face_recognize = threading.Thread(target=call_face_recognize)
# thread_face_recognize.start()
