import pyautogui
from cvzone.FaceMeshModule import FaceMeshDetector
import cv2
from torchvision import transforms
import torch
import warnings
warnings.filterwarnings("ignore")

time_s=0
leftEyeUpPoint = []
leftEyeDownPoint = []
leftEyeLeftPoint = []
leftEyeRightPoint = []
rightEyeUpPoint = []
rightEyeDownPoint = []
rightEyeLeftPoint = []
rightEyeRightPoint = []
# 初始化网络摄像头
# '2' 表示与计算机连接的第三台相机，'0' 通常指内置网络摄像头
cap = cv2.VideoCapture(0)
model = torch.load('eyes.pth')
model.eval()
# 初始化 FaceMeshDetector 对象
# staticMode: 若为真，则仅检测一次，否则每帧都检测
# maxFaces: 最大可检测人脸数量
# minDetectionCon: 检测置信度阈值
# minTrackCon: 跟踪置信度阈值
detector = FaceMeshDetector(staticMode=False, maxFaces=1, minDetectionCon=0.5, minTrackCon=0.5)

# 人眼关键点所在的索引
idList = [22, 23, 24, 26, 110, 130, 157, 158, 159, 160, 161, 243]  #左眼
print(pyautogui.size().height)

# 开始循环以持续获取摄像头帧
while True:
    # 从网络摄像头读取当前帧
    # success: 帧是否成功捕获的布尔值
    # img: 当前帧
    success, img = cap.read()
    w1 = pyautogui.size().width
    h1 = pyautogui.size().height
    # print(w1, h1)
    # 在图像中找到面部网格
    # img: 如设置 draw=True 则更新了面部网格的图像
    # faces: 检测到的面部信息
    img, faces = detector.findFaceMesh(img, draw=False)
    # 如果检测到面部
    if faces:
        time_s+=1
        # 遍历每个检测到的面部
        for face in faces:
            # 获取眼睛特定点
            leftEyeUpPoint = face[159]
            leftEyeDownPoint = face[23]
            leftEyeLeftPoint = face[130]
            leftEyeRightPoint = face[243]
            rightEyeUpPoint = face[386]
            rightEyeDownPoint = face[253]
            rightEyeLeftPoint = face[463]
            rightEyeRightPoint = face[466]

            # 绘制眼部周围的特征点
            demo1 = [leftEyeUpPoint, leftEyeDownPoint, leftEyeLeftPoint, leftEyeRightPoint]
            demo2 = [rightEyeUpPoint, rightEyeDownPoint, rightEyeLeftPoint, rightEyeRightPoint]

            # 框选出眼睛的矩形图
            aa1x = leftEyeLeftPoint[0] - 10
            aa1y = min(leftEyeUpPoint[1], rightEyeUpPoint[1]) - 10
            aa2x = rightEyeRightPoint[0] + 10
            aa2y = max(rightEyeDownPoint[1], leftEyeDownPoint[1]) + 10
            aa1 = [aa1x, aa1y]
            aa2 = [aa2x, aa2y]

            cv2.rectangle(img, aa1, aa2, (255, 0, 0), 2)

            def cropped_eyes():
                x = aa1x
                y = aa1y
                w = abs(aa1x - aa2x)
                h = abs(aa1y - aa2y)
                return x, y, w, h

            myTransforms = transforms.Compose([
                transforms.ToTensor(),
                transforms.Resize((20, 50))
            ])
            x, y, w, h = cropped_eyes()

            cropped_screen = img[y:y + h, x:x + w]
            if cropped_screen is not None and time_s==15:
                output = model(myTransforms(cropped_screen).unsqueeze(0))

                out = output.tolist()
                eye_x = out[0][0]
                eye_y = out[0][1]

                print(
                    int(((eye_x + 1) / 2) * w1),
                    int(((eye_y + 1) / 2) * h1)
                )
                sc_x=pyautogui.position().x
                sc_y = pyautogui.position().y
                if sc_x>int(((eye_x + 1) / 2) * w1):
                    t_x=-abs(sc_x-int(((eye_x + 1) / 2) * w1))
                else:
                    t_x =abs(sc_x-int(((eye_x + 1) / 2) * w1))
                if sc_y>int(((eye_y + 1) / 2) * h1):
                    t_y=-abs(sc_y-int(((eye_y + 1) / 2) * h1))
                else:
                    t_y =abs(sc_y-int(((eye_y + 1) / 2) * h1))

                pyautogui.move(t_x,t_y )
                # cv2.circle(img, (int(((eye_x + 1) / 2) * w1), int(((eye_y + 1) / 2) * h1)), 10, (0, 255, 0), cv2.FILLED)
                time_s=0
    # cv2.namedWindow('Image', cv2.WINDOW_NORMAL)

    # 在名为 'Image' 的窗口中显示图像
    cv2.imshow("eye-tracking-cnn", img)

    # 等待 1 毫秒检查用户输入，保持窗口打开状态
    cv2.waitKey(1)
