import threading
import cv2
import time
# from GRU import *
import torch
import mediapipe as mp
import numpy as np
from HandTrackingModule import HandDetector  # 手部检测方法
import HandTrackModule as htm
import pyautogui


# 禁用PyAutoGUI的安全机制
pyautogui.FAILSAFE = False

class Identify:
    def __init__(self, win):
        self.win = win
        self.isEnd = False

    def start(self):
        threading.Thread(target=self.run).start()

    def run(self):

        tero = 30  # 双指捏合点击间距

        dot = [120, 120, 550, 400]  # 430(420? 280

        ms_l = 0  # 鼠标按下/抬起标记

        self.cap = cv2.VideoCapture(0)  # 摄像头图像采集

        self.detector = HandDetector(mode=False,  # 视频流图像
                                maxHands=2,  # 最多检测一只手
                                detectionCon=0.8,  # 最小检测置信度
                                minTrackCon=0.5)  # 最小跟踪置信度

        # prin_time = time.time()  # 初始化输出时间
        #
        # mp_drawing = mp.solutions.drawing_utils  # 坐标点绘制工具
        mp_hands = mp.solutions.hands
        #镜头

        ratio = 480.0 / 640.0  # 高宽比

        # print( self.cap.get(4))
        # print( self.cap.get(3))
        with mp_hands.Hands(
                static_image_mode=False,
                max_num_hands=2,  # 最大的手的数量
                min_detection_confidence=0.65,  # 置信度高于0.65，输出手部模型
                min_tracking_confidence=0.5) as hands:
            # start_time = time.time()  # 初始化当前帧帧起始时间
            # pass
            while self.cap.isOpened():
                if self.isEnd:
                    break
                self.win.eventRunning.wait()

                success, image = self.cap.read()  # 获取摄像头输出

                hands, image = self.detector.findHands(image)

                image = cv2.flip(image, 1)

                handPoints = self.detector.findPosition(image)
                # for hand in hands:

                # 单手手势
                if len(hands) == 1:
                    # print(hands[0]['lmList'][0][1])
                    fingers_1 = self.detector.fingersUp(hands[0])
                    if fingers_1 == [0, 1, 0, 0, 0]:  # 上
                        pyautogui.press('left')
                        time.sleep(0.2)
                    elif fingers_1 == [0, 0, 1, 1, 1]:  # 下
                        pyautogui.press('right')
                        time.sleep(0.5)
                    elif fingers_1 == [1, 1, 0, 0, 0]:  # 播放当前
                        pyautogui.hotkey('shift', 'F5')
                        time.sleep(0.2)
                    elif fingers_1 == [0, 1, 1, 1, 1]:  # 放大
                        pyautogui.hotkey('ctrl', '+')
                        time.sleep(0.2)
                    elif fingers_1 == [0, 1, 1, 1, 0]:  # 缩小
                        pyautogui.hotkey('ctrl', '-')
                        time.sleep(0.2)
                    elif fingers_1 == [1, 0, 0, 0, 0]:  # 左旋
                        pyautogui.hotkey('ctrl', 'l')
                        time.sleep(0.8)
                    elif fingers_1 == [0, 0, 0, 0, 1]:  # 右旋
                        pyautogui.hotkey('ctrl', 'r')
                    elif fingers_1 == [1, 1, 1, 1, 1]:  # 鼠标
                        x0, y0 = handPoints[0][1] - 100, handPoints[0][2] - 120
                        x1 = (x0 / (dot[2] - dot[0])) * 1920
                        y1 = (y0 / (dot[3] - dot[1])) * 1080
                        pyautogui.moveTo(1920 - x1, y1, duration=0.01)
                    elif fingers_1 == [0, 1, 1, 0, 0]:  # 鼠标
                        pyautogui.doubleClick()
                        x3, y3 = handPoints[8][1], handPoints[8][2]
                        x2, y2 = handPoints[12][1], handPoints[12][2]
                        length, info = self.detector.findDistance((x3, y3), (x2, y2), image)
                        if length < tero and ms_l == 0:
                            # 鼠标按下
                            ms_l = 1
                            pyautogui.mouseDown()
                            x_0, y_0 = handPoints[0][1] - 100, handPoints[0][2] - 120
                            x_1 = (x_0 / (dot[2] - dot[0])) * 1920
                            y_1 = (y_0 / (dot[3] - dot[1])) * 1080
                            print("down")
                            # 移动鼠标
                            pyautogui.moveTo(1920 - x_1, y_1, duration=0)
                        elif length > tero + 50 and ms_l == 1:
                            # 鼠标释放
                            ms_l = 0
                            pyautogui.mouseUp()
                            print("up")
                        time.sleep(0.8)

                # 双手手势
                if len(hands) == 2:
                    fingers_1 = self.detector.fingersUp(hands[0])
                    fingers_2 = self.detector.fingersUp(hands[1])
                    # print(fingers_1, fingers_2)
                    if fingers_1 == [1, 1, 0, 0, 0] and fingers_2 == [1, 1, 0, 0, 0]:  # 截图
                        im = pyautogui.screenshot()  # 获取屏幕截图
                        im.save('screenshot.png')
                        time.sleep(2)

                    if fingers_1 == [1, 1, 1, 1, 1] and fingers_2 == [1, 1, 1, 1, 1]:  #关闭当前
                        pyautogui.press('esc')
                        time.sleep(0.2)

                if self.win.eventRunning.isSet():
                    self.win.flash_img(image, ratio)

            self.cap.release()


    def break_loop(self):
        self.isEnd = True
