import cv2
import mediapipe as mp
import time

cap = cv2.VideoCapture(0)  # 调用摄像头
mpHands = mp.solutions.hands  # 调用手部模型
hands = mpHands.Hands()  # 使用默认的函式

# 设置样式
mpDraw = mp.solutions.drawing_utils
# 点的样式 red 粗度 5px
handLmsStyle = mpDraw.DrawingSpec(color=(0, 0, 255), thickness=5)
# 线的样式 green cudu 5px
handConStyle = mpDraw.DrawingSpec(color=(0, 255, 0), thickness=5)
pTime = 0
cTime = 0

while True:
    ret, img = cap.read()  # 查看摄像头读取的图像
    if ret:
        imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # 将图片转为灰度图
        result = hands.process(imgRGB)  # 图片预测出来的一个结果
        #print(result.multi_hand_landmarks)
        imgHeight = img.shape[0]
        imgWidth = img.shape[1]

        if result.multi_hand_landmarks:  # 如果检测到手
            for handLms in result.multi_hand_landmarks:  # 将手的图片在整个检测到的手的位置列表中一一遍历
                # 参数：图片 点 线 点的样式 线的样式
                mpDraw.draw_landmarks(img, handLms, mpHands.HAND_CONNECTIONS, handLmsStyle, handConStyle)
        cTime = time.time()
        fps = 1 / (cTime - pTime)  # 1秒/一张图片的显示时间=1秒显示多少帧
        pTime = cTime
        # 将fps输出到图片上
        cv2.putText(img, f"FPS :{int(fps)}", (30, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 3)
        # 显示图片
        cv2.imshow('识别手掌', img)

    # 当输入英文字符q退出循环
    if cv2.waitKey(1) == ord('q'):
        break