import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_pose = mp.solutions.pose

import math
# https://google.github.io/mediapipe/solutions/pose.html

# 这两个函数是来自公众号
def Normalize_landmarks(image, hand_landmarks):
    new_landmarks = []
    for i in range(0, len(hand_landmarks.landmark)):
        float_x = hand_landmarks.landmark[i].x
        float_y = hand_landmarks.landmark[i].y
        width = image.shape[1]
        height = image.shape[0]
        pt = mp_drawing._normalized_to_pixel_coordinates(float_x, float_y, width, height)
        new_landmarks.append(pt)
    return new_landmarks

def calAngle(pt1, pt2, pt3):
    # print('---------')
    a = math.sqrt(math.pow(pt2[1] - pt1[1], 2) + math.pow(pt2[0] - pt1[0], 2))
    b = math.sqrt(math.pow(pt3[1] - pt2[1], 2) + math.pow(pt3[0] - pt2[0], 2))
    c = math.sqrt(math.pow(pt1[1] - pt3[1], 2) + math.pow(pt1[0] - pt3[0], 2))
    # print(a,b,c)
    angle = math.acos((a * a + b * b - c * c) / (2 * a * b)) * 180 / math.pi
    # print(angle)
    return angle


# 下面是官方例子 我修改了一下 ，计算一下腿的角度，利用公众号的函数
def for_static_images(mp_pose,mp_drawing,mp_drawing_styles):
  # For static images:
  IMAGE_FILES = []
  BG_COLOR = (192, 192, 192) # gray
  with mp_pose.Pose(
      static_image_mode=True,
      model_complexity=2,
      enable_segmentation=True,
      min_detection_confidence=0.5) as pose:
    for idx, file in enumerate(IMAGE_FILES):
      image = cv2.imread(file)
      image_height, image_width, _ = image.shape
      # Convert the BGR image to RGB before processing.
      results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))

      if not results.pose_landmarks:
        continue
      print(
          f'Nose coordinates: ('
          f'{results.pose_landmarks.landmark[mp_holistic.PoseLandmark.NOSE].x * image_width}, '
          f'{results.pose_landmarks.landmark[mp_holistic.PoseLandmark.NOSE].y * image_height})'
      )

      annotated_image = image.copy()
      # Draw segmentation on the image.
      # To improve segmentation around boundaries, consider applying a joint
      # bilateral filter to "results.segmentation_mask" with "image".
      #在图像上绘制分割图。
  # 改进边界周围的分割，考虑应用一个关节
  #双边过滤“结果.分割遮罩”与“图像”。
      condition = np.stack((results.segmentation_mask,) * 3, axis=-1) > 0.1
      bg_image = np.zeros(image.shape, dtype=np.uint8)
      bg_image[:] = BG_COLOR
      annotated_image = np.where(condition, annotated_image, bg_image)
      # 带注释的图像
      # Draw pose landmarks on the image.
      # 在图像上绘制姿势地标。
      mp_drawing.draw_landmarks(
          annotated_image,
          results.pose_landmarks,
          mp_pose.POSE_CONNECTIONS,
          landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style())
      cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', annotated_image)
      # Plot pose world landmarks.
      # 绘制世界地标。
      print("绘制世界地标。")
      mp_drawing.plot_landmarks(
          results.pose_world_landmarks, mp_pose.POSE_CONNECTIONS)

# For webcam input:
# file_path=r"G:\UCF-101\HammerThrow\v_HammerThrow_g01_c02.avi"
file_path=r"G:\UCF-101\HighJump\v_HighJump_g02_c05.avi"
# 跳起来的时候 没有表示到。其他时候都有 跳高
# 0
# file_path
cap = cv2.VideoCapture(file_path)
with mp_pose.Pose(
    min_detection_confidence=0.5,
    min_tracking_confidence=0.5) as pose:
  while cap.isOpened():
    success, image = cap.read()
    if not success:
      print("Ignoring empty camera frame.")
      # If loading a video, use 'break' instead of 'continue'.
      # continue
      break

    # Flip the image horizontally for a later selfie-view display, and convert
    # the BGR image to RGB.
    #水平翻转图像，以便稍后显示自拍视图，然后转换
#将BGR图像转换为RGB。
    image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
    # To improve performance, optionally mark the image as not writeable to
    # pass by reference.
    #要提高性能，可以选择将映像标记为不可写入
#通过引用传递。
    image.flags.writeable = False
    results = pose.process(image)
    # print(results)
    pose_landmarks= results.pose_landmarks
    if pose_landmarks is not None:
      normalized_landmarks =Normalize_landmarks(image, pose_landmarks)
      # print("normalized_landmarks:",normalized_landmarks)
      # 正则化了就直接可以画上去了
      left_hip=normalized_landmarks[mp_pose.PoseLandmark.LEFT_HIP]
      LEFT_KNEE=normalized_landmarks[mp_pose.PoseLandmark.LEFT_KNEE]
      LEFT_ANKLE=normalized_landmarks[mp_pose.PoseLandmark.LEFT_ANKLE]
      cv2.circle(image, normalized_landmarks[mp_pose.PoseLandmark.LEFT_HIP], 12, (255, 0, 255), -1, cv2.LINE_AA)
      angle=calAngle(left_hip,LEFT_KNEE,LEFT_ANKLE)
      print("angle",angle)
      #  0.5 字体大小
      #  (10, 30), 左边 10， 上下 30 （从上下来
      # 显示不了中文 不能° 
      cv2.putText(image, str(int(angle))+"cent", (10, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 0, 0), 2)
    
    # Draw the pose annotation on the image.
    # 在图像上绘制姿势注释。
    image.flags.writeable = True
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    mp_drawing.draw_landmarks(
        image,
        results.pose_landmarks,
        mp_pose.POSE_CONNECTIONS,
        landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style())
    cv2.imshow('MediaPipe Pose', image)
    if cv2.waitKey(5) & 0xFF == 27:
      break
cap.release()

# https://google.github.io/mediapipe/solutions/pose.html