# """
# 尝试使用Mediapipe识别人物，结合pyautogui自动化操作
#  做一个锁头外挂
# """
# import time
#
# import pyautogui
# import cv2
# import mediapipe as mp
# from PIL import ImageGrab
#
# # 如果发生异常，关闭自动化操作，防止失控
#
# pyautogui.FAILSAFE = False
# # 获取当前屏幕的长宽
# win_width, win_height = pyautogui.size()
# mp_holistic = mp.solutions.holistic
# mp_drawing = mp.solutions.drawing_utils
# with mp_holistic.Holistic(min_tracking_confidence=0.5, min_detection_confidence=0.5) as holistic:
#     while True:
#         # 截图
#         ImageGrab.grab().save(r'images/video.jpg', "JPEG")
#         time.sleep(0.01)
#         img = cv2.imread(r'images/video.jpg')
#         # 转换为RGB类型
#         imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#         results = holistic.process(imgRGB)
#         if results:
#             img = cv2.cvtColor(imgRGB, cv2.COLOR_RGB2BGR)
#             mp_drawing.draw_landmarks(img, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS)
#             positin = results.pose_landmarks.landmark[0]
#             x = positin.x
#             y = positin.y
#             pyautogui.moveTo()
#             print('x轴坐标:', win_width*y)
#         cv2.resize(img, (win_width // 4, win_height // 4))
#         cv2.imshow('img', img)
#         pyautogui.moveTo(win_height * x, win_width * y)
#         if cv2.waitKey(1) == ord('q'):
#             break
# cv2.destroyAllWindows()

import cv2
import pyautogui
import mediapipe as mp

cap = cv2.VideoCapture('images/跳舞视频.mp4')
cap.set(3, 1920)
cap.set(4, 1080)
mp_holistic = mp.solutions.holistic
mp_drawing = mp.solutions.drawing_utils
with mp_holistic.Holistic(min_tracking_confidence=0.5, min_detection_confidence=0.5) as holistic:
    while cap.isOpened():
        success, img = cap.read()
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        results = holistic.process(img)
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        if results:
            mp_drawing.draw_landmarks(img, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS)
            # position = results.pose_landmarks.landmark[0]
            if results.pose_landmarks is not None:
                x = results.pose_landmarks.landmark[0].x
                y = results.pose_landmarks.landmark[0].y
                w, h, _ = img.shape
            # x = position.x
            # y = position.y
            #     print(x*w, y*h)
                pyautogui.moveTo(y*h, x*w)
        cv2.imshow('video', img)
        if cv2.waitKey(10) == ord('q'):
            break
cap.release()
cv2.destroyAllWindows()
