import tensorflow as tf
import mediapipe as mp
import cv2
import numpy as np
from PIL import ImageFont, ImageDraw, Image

# 加载tflite模型并为矩阵分配空间
interpreter = tf.lite.Interpreter(model_path='body_recognition_1dconv.tflite')
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()

mp_drawing = mp.solutions.drawing_utils
mp_holistic = mp.solutions.holistic


def putText(img, text, org, font_path, color=(0, 0, 255), font_size=20):
    """
    在图片上显示文字
    :param img: 输入的img, 通过cv2读取
    :param text: 要显示的文字
    :param org: 文字左上角坐标
    :param font_path: 字体路径
    :param color: 字体颜色, (B,G,R)
    :return:
    """
    img_pil = Image.fromarray(img)
    draw = ImageDraw.Draw(img_pil)
    b, g, r = color
    a = 0
    draw.text(org, text, font=ImageFont.truetype(font_path, font_size), fill=(b, g, r, a))
    img = np.array(img_pil)
    return img


def pose_predict(predict_num):
    if predict_num == 0:
        pose = '保持安静'
    elif predict_num == 1:
        pose = '打招呼'
    elif predict_num == 2:
        pose = '赞佩'
    elif predict_num == 3:
        pose = '思考、疑惑'
    elif predict_num == 4:
        pose = '拒绝'
    elif predict_num == 5:
        pose = '无奈'
    elif predict_num == 6:
        pose = '无动作'
    return pose


def run():
    cap = cv2.VideoCapture(0)  # 打开摄像头
    red_1 = mp_drawing.DrawingSpec((0, 0, 255), 1, 1)
    green_2 = mp_drawing.DrawingSpec((0, 255, 0), 2, 1)
    with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:

        while cap.isOpened():
            ret, frame = cap.read()  # cap.read()按帧读取视频

            # Recolor Feed
            image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)  # BGR2RGB
            # Make Detections
            results = holistic.process(image)

            #
            # face_landmarks, pose_landmarks, left_landmarks, right_landmarks
            # Draw face landmark
            # mp_drawing.draw_landmarks(frame, results.face_landmarks, mp_holistic.FACEMESH_CONTOURS,
            #                         mp_drawing.DrawingSpec(color=(80,110,10), thickness=1, circle_radius=1),
            #                         mp_drawing.DrawingSpec(color=(80,256,121), thickness=1, circle_radius=1))

            # Pose
            mp_drawing.draw_landmarks(frame, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS,
                                      mp_drawing.DrawingSpec(color=(80, 22, 10), thickness=2, circle_radius=4),
                                      mp_drawing.DrawingSpec(color=(80, 44, 121), thickness=2, circle_radius=2))

            # Right hand
            mp_drawing.draw_landmarks(frame, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS,
                                      mp_drawing.DrawingSpec(color=(121, 22, 76), thickness=2, circle_radius=4),
                                      mp_drawing.DrawingSpec(color=(121, 44, 250), thickness=2, circle_radius=2))

            # Left hand
            mp_drawing.draw_landmarks(frame, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS,
                                      mp_drawing.DrawingSpec(color=(245, 117, 66), thickness=2, circle_radius=4),
                                      mp_drawing.DrawingSpec(color=(245, 66, 230), thickness=2, circle_radius=2))

            Realtimedata_pose, Realtimedata_righthand, Realtimedata_lefthand, Realtimefeature = [], [], [], []
            #   pose_landmarks
            try:
                for i in range(len(results.pose_landmarks.landmark)):
                    landmark = results.pose_landmarks.landmark[i]
                    Realtimedata_pose += [landmark.x, landmark.y, landmark.z]
            except:
                Realtimedata_pose += [0] * 33 * 3
            #    right_hand_landmarks
            try:
                for i in range(len(results.right_hand_landmarks.landmark)):
                    landmark = results.right_hand_landmarks.landmark[i]
                    Realtimedata_righthand += [landmark.x, landmark.y, landmark.z]
            except:
                Realtimedata_righthand += [0] * 21 * 3
            #    left_hand_landmarks
            try:
                for i in range(len(results.left_hand_landmarks.landmark)):
                    landmark = results.left_hand_landmarks.landmark[i]
                    Realtimedata_lefthand += [landmark.x, landmark.y, landmark.z]
            except:
                Realtimedata_lefthand += [0] * 21 * 3

            all_feature = Realtimedata_pose + Realtimedata_righthand + Realtimedata_lefthand
            all_feature = np.array(all_feature).reshape(1, 225)

            all_feature = np.expand_dims(all_feature, axis=2)

            input_data = np.array(all_feature, dtype=np.float32)
            interpreter.set_tensor(input_details[0]['index'], input_data)
            interpreter.invoke()
            output_data = interpreter.get_tensor(output_details[0]['index'])

            predict_num = np.argmax(output_data)  # 预测类型 数字
            predict = pose_predict(predict_num)  # 数字to文字
            print(output_data, predict)

            # 预测结果可视化到窗口
            font_path = 'simsun.ttc'
            frame = putText(frame, predict, (0, 10), font_path, (0, 0, 255), 32)
            # print('Predicting action is {}'.format(predict))
            cv2.imshow('Holistic Model Detection', frame)
            if cv2.waitKey(10) & 0xFF == ord('q'):
                break
    cap.release()
    cv2.destroyAllWindows()


if __name__ == '__main__':
    run()
