# import cv2
# import mediapipe as mp
# import numpy as np
# # 初始化 MediaPipe 手部检测模块
# mp_hands = mp.solutions.hands
# hands = mp_hands.Hands(min_detection_confidence=0.7, min_tracking_confidence=0.5)
# mp_drawing = mp.solutions.drawing_utils

# # 打开摄像头
# cap = cv2.VideoCapture(0)
# ret, frame = cap.read()
# black_frame = np.zeros_like(frame)
# while True:
#     ret, frame = cap.read()
#     if not ret:
#         break

#     # 转换为RGB格式
#     rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

#     # 进行手部检测
#     results = hands.process(rgb_frame)

#     # 如果检测到手部
#     if results.multi_hand_landmarks:
#         for landmarks in results.multi_hand_landmarks:
#             # 绘制手部的关节连接
#             mp_drawing.draw_landmarks(frame, landmarks, mp_hands.HAND_CONNECTIONS)
#             mp_drawing.draw_landmarks(black_frame, landmarks, mp_hands.HAND_CONNECTIONS)
#             # 获取每个关节点的坐标
#             for id, landmark in enumerate(landmarks.landmark):
#                 h, w, c = frame.shape
#                 cx, cy = int(landmark.x * w), int(landmark.y * h)
#                 cv2.circle(frame, (cx, cy), 5, (0, 255, 0), -1)
#                 cv2.circle(black_frame, (cx, cy), 5, (0, 255, 0), -1)


#     # 显示结果
#     cv2.imshow("Hand Gesture Detection", frame)
#     cv2.imshow("Black Frame", black_frame)

#     # 按 'q' 键退出
#     if cv2.waitKey(1) & 0xFF == ord('q'):
#         break
#     black_frame[:] = 0

# # 释放资源
# cap.release()
# cv2.destroyAllWindows()

# import cv2
# import mediapipe as mp
# import numpy as np

# # 初始化 MediaPipe 手部检测模块
# mp_hands = mp.solutions.hands
# hands = mp_hands.Hands(min_detection_confidence=0.7, min_tracking_confidence=0.5)
# mp_drawing = mp.solutions.drawing_utils

# # 手部关节名称映射
# hand_landmarks = [
#     "WRIST", "THUMB_CMC", "THUMB_MCP", "THUMB_IP", "THUMB_TIP",
#     "INDEX_FINGER_MCP", "INDEX_FINGER_PIP", "INDEX_FINGER_DIP", "INDEX_FINGER_TIP",
#     "MIDDLE_FINGER_MCP", "MIDDLE_FINGER_PIP", "MIDDLE_FINGER_DIP", "MIDDLE_FINGER_TIP",
#     "RING_FINGER_MCP", "RING_FINGER_PIP", "RING_FINGER_DIP", "RING_FINGER_TIP",
#     "PINKY_MCP", "PINKY_PIP", "PINKY_DIP", "PINKY_TIP"
# ]

# # 打开摄像头
# cap = cv2.VideoCapture(0)
# ret, frame = cap.read()
# black_frame = np.zeros_like(frame)

# while True:
#     ret, frame = cap.read()
#     if not ret:
#         break

#     # 转换为RGB格式
#     rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

#     # 进行手部检测
#     results = hands.process(rgb_frame)

#     # 如果检测到手部
#     if results.multi_hand_landmarks:
#         for landmarks in results.multi_hand_landmarks:
#             # 绘制手部的关节连接
#             mp_drawing.draw_landmarks(frame, landmarks, mp_hands.HAND_CONNECTIONS)
#             mp_drawing.draw_landmarks(black_frame, landmarks, mp_hands.HAND_CONNECTIONS)

#             # 获取每个关节点的坐标
#             for id, landmark in enumerate(landmarks.landmark):
#                 h, w, c = frame.shape
#                 cx, cy = int(landmark.x * w), int(landmark.y * h)

#                 # 绘制关节点
#                 cv2.circle(frame, (cx, cy), 5, (0, 255, 0), -1)
#                 cv2.circle(black_frame, (cx, cy), 5, (0, 255, 0), -1)

#                 # 在每个关节点旁边显示关节名称
#                 cv2.putText(frame, hand_landmarks[id], (cx + 10, cy), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 1)

#     # 显示结果
#     cv2.imshow("Hand Gesture Detection", frame)
#     cv2.imshow("Black Frame", black_frame)

#     # 按 'q' 键退出
#     if cv2.waitKey(1) & 0xFF == ord('q'):
#         break

#     black_frame[:] = 0

# # 释放资源
# cap.release()
# cv2.destroyAllWindows()


# import cv2
# import mediapipe as mp
# import numpy as np
# from PIL import Image, ImageDraw, ImageFont

# # 初始化 MediaPipe 手部检测模块
# mp_hands = mp.solutions.hands
# hands = mp_hands.Hands(min_detection_confidence=0.7, min_tracking_confidence=0.5)
# mp_drawing = mp.solutions.drawing_utils

# # 手部关节名称映射
# hand_landmarks = [
#     "腕部", "拇指基节", "拇指中节", "拇指末节", "拇指尖",
#     "食指基节", "食指中节", "食指末节", "食指尖",
#     "中指基节", "中指中节", "中指末节", "中指尖",
#     "无名指基节", "无名指中节", "无名指末节", "无名指尖",
#     "小指基节", "小指中节", "小指末节", "小指尖"
# ]

# # 打开摄像头
# cap = cv2.VideoCapture(0)
# ret, frame = cap.read()
# black_frame = np.zeros_like(frame)

# # 加载中文字体
# font_path = "msyh.ttc"  # 你可以使用适合你系统的字体文件路径
# font = ImageFont.truetype(font_path, 20)

# while True:
#     ret, frame = cap.read()
#     if not ret:
#         break

#     # 转换为RGB格式
#     rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

#     # 进行手部检测
#     results = hands.process(rgb_frame)

#     # 如果检测到手部
#     if results.multi_hand_landmarks:
#         for landmarks in results.multi_hand_landmarks:
#             # 绘制手部的关节连接
#             mp_drawing.draw_landmarks(frame, landmarks, mp_hands.HAND_CONNECTIONS)
#             mp_drawing.draw_landmarks(black_frame, landmarks, mp_hands.HAND_CONNECTIONS)

#             # 获取每个关节点的坐标
#             for id, landmark in enumerate(landmarks.landmark):
#                 h, w, c = frame.shape
#                 cx, cy = int(landmark.x * w), int(landmark.y * h)

#                 # 绘制关节点
#                 cv2.circle(frame, (cx, cy), 5, (0, 255, 0), -1)
#                 cv2.circle(black_frame, (cx, cy), 5, (0, 255, 0), -1)

#                 # 使用Pillow绘制中文文本
#                 pil_image = Image.fromarray(frame)
#                 draw = ImageDraw.Draw(pil_image)
#                 text = hand_landmarks[id]
#                 draw.text((cx + 10, cy), text, font=font, fill=(0, 0, 255))

#                 # 将Pillow图像转换回OpenCV图像
#                 frame = np.array(pil_image)

#     # 显示结果
#     cv2.imshow("Hand Gesture Detection", frame)
#     cv2.imshow("Black Frame", black_frame)

#     # 按 'q' 键退出
#     if cv2.waitKey(1) & 0xFF == ord('q'):
#         break

#     black_frame[:] = 0

# # 释放资源
# cap.release()
# cv2.destroyAllWindows()
