import json

import mediapipe as mp
import cv2
import numpy as np
from PIL import ImageFont, ImageDraw, Image
from tensorflow import keras, lite
from marshmallow import Schema, fields, EXCLUDE


class Holistic(object):
    def __init__(self):
        self.mp_drawing = mp.solutions.drawing_utils
        self.mp_holistic = mp.solutions.holistic
        self.holistic = self.mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5)

    def process(self, frame):
        """获取mp处理的矩阵结果"""
        image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)  # BGR2RGB
        results = self.holistic.process(image)
        return results

    def draw(self, frame, results):
        """在图上绘制mp识别结果"""
        # Draw face landmark
        self.mp_drawing.draw_landmarks(frame, results.face_landmarks, self.mp_holistic.FACEMESH_CONTOURS,
                                       self.mp_drawing.DrawingSpec(color=(80, 110, 10), thickness=1, circle_radius=1),
                                       self.mp_drawing.DrawingSpec(color=(80, 256, 121), thickness=1, circle_radius=1))

        # Pose
        self.mp_drawing.draw_landmarks(frame, results.pose_landmarks, self.mp_holistic.POSE_CONNECTIONS,
                                       self.mp_drawing.DrawingSpec(color=(80, 22, 10), thickness=2, circle_radius=4),
                                       self.mp_drawing.DrawingSpec(color=(80, 44, 121), thickness=2, circle_radius=2))

        # Right hand
        self.mp_drawing.draw_landmarks(frame, results.right_hand_landmarks, self.mp_holistic.HAND_CONNECTIONS,
                                       self.mp_drawing.DrawingSpec(color=(121, 22, 76), thickness=2, circle_radius=4),
                                       self.mp_drawing.DrawingSpec(color=(121, 44, 250), thickness=2, circle_radius=2))

        # Left hand
        self.mp_drawing.draw_landmarks(frame, results.left_hand_landmarks, self.mp_holistic.HAND_CONNECTIONS,
                                       self.mp_drawing.DrawingSpec(color=(245, 117, 66), thickness=2, circle_radius=4),
                                       self.mp_drawing.DrawingSpec(color=(245, 66, 230), thickness=2, circle_radius=2))
        return frame

    def process_frame(self, frame):
        """处理视频帧，获取绘制后的图像"""
        results = self.process(frame)

        frame = self.draw(frame, results)
        return frame

    @staticmethod
    def show(frame, windows_name: str = "Holistic"):
        """本地可视化画面"""
        cv2.imshow(windows_name, frame)
        if cv2.waitKey(10) & 0xFF == ord('q'):
            exit()


class ResultEntity:
    """肢体语言结果对象"""

    def __init__(self, array):
        self.label = {0: '保持安静', 1: '打招呼', 2: '敬佩', 3: '思考', 4: '拒绝', 5: '无奈', 6: '无动作'}

        self.predict, self.detail = self.parse_array(array)

    def parse_array(self, array):
        index = np.argmax(array)
        predict = self.label.get(index)
        detail = {}
        for i, label in self.label.items():
            detail[label] = array[0][i]  #   detail？？？
        return predict, detail


class ResultSchema(Schema):
    """肢体语言结果对象解析"""
    predict = fields.Str(data_key='predict')
    detail = fields.Dict(data_key='detail')

    class Meta:
        unknown = EXCLUDE


class BodyLanguage(object):
    """识别肢体语言"""

    def __init__(self, mode: str = 'lite'):
        """初始化， mode可选'lite', 'tf'"""
        self.holistic = Holistic()
        self.model = None
        self.mode = mode
        if self.mode == 'lite':
            self.init_tflite()
        else:
            self.init_weights()

    def init_weights(self):
        """初始化权重"""
        # ---------参数设置----------------
        if self.model is None:
            model_name = 'bestmodel_all_features'
            self.model = keras.models.load_model(model_name)
            print(f'successfully load {model_name}')

    def init_tflite(self):
        """初始化tflite模型"""
        self.interpreter = lite.Interpreter(model_path='body_recognition_1dconv.tflite')
        self.interpreter.allocate_tensors()
        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()

    def preprocess(self, results):
        """处理mp results数据,获取用于肢体语言分析的数据"""
        data_pose, data_right_hand, data_left_hand = [], [], []
        #   pose_landmarks
        try:
            for i in range(len(results.pose_landmarks.landmark)):
                landmark = results.pose_landmarks.landmark[i]
                data_pose += [landmark.x, landmark.y, landmark.z]
        except:
            data_pose += [0] * 33 * 3
        #    right_hand_landmarks
        try:
            for i in range(len(results.right_hand_landmarks.landmark)):
                landmark = results.right_hand_landmarks.landmark[i]
                data_right_hand += [landmark.x, landmark.y, landmark.z]
        except:
            data_right_hand += [0] * 21 * 3
        #    left_hand_landmarks
        try:
            for i in range(len(results.left_hand_landmarks.landmark)):
                landmark = results.left_hand_landmarks.landmark[i]
                data_left_hand += [landmark.x, landmark.y, landmark.z]
        except:
            data_left_hand += [0] * 21 * 3

        data = data_pose + data_right_hand + data_left_hand
        data = np.array(data).reshape(1, 225)
        data = np.expand_dims(data, axis=2)
        return data

    def keras_inference(self, data):
        array = self.model.predict(data)
        return array

    def lite_inference(self, data):
        input_data = np.array(data, dtype=np.float32)
        self.interpreter.set_tensor(self.input_details[0]['index'], input_data)
        self.interpreter.invoke()
        array = self.interpreter.get_tensor(self.output_details[0]['index'])
        return array

    def process(self, frame):
        """获取算法处理后的json结果"""

        self.holistic_results = self.holistic.process(frame)
        data = self.preprocess(self.holistic_results)
        if self.mode == 'lite':
            array = self.lite_inference(data)
        else:
            array = self.keras_inference(data)
        result = ResultEntity(array)
        # print(ResultSchema().dump(result))
        return result

    def process_frame(self, frame):
        """处理视频帧，获取绘制后的图像"""
        result = self.process(frame)
        predict = result.predict
        frame = self.draw(frame, predict)
        self.holistic.draw(frame, self.holistic_results) # draw特征点
        return frame

    def draw(self, frame, results):
        """绘制肢体语言识别结果"""
        # 预测结果可视化到窗口
        font_path = 'simsun.ttc'
        frame = putText(frame, results, (0, 10), font_path, (0, 0, 255), 32)
        return frame

    def show(self, frame, windows_name: str = "Body Language"):
        self.holistic.show(frame, windows_name)


def putText(img, text, org, font_path, color=(0, 0, 255), font_size=20):
    """
    在图片上显示文字
    :param img: 输入的img, 通过cv2读取
    :param text: 要显示的文字
    :param org: 文字左上角坐标
    :param font_path: 字体路径
    :param color: 字体颜色, (B,G,R)
    :return:
    """
    img_pil = Image.fromarray(img)
    draw = ImageDraw.Draw(img_pil)
    b, g, r = color
    a = 0
    draw.text(org, text, font=ImageFont.truetype(font_path, font_size), fill=(b, g, r, a))
    img = np.array(img_pil)
    return img


def run():
    cap = cv2.VideoCapture(0)  # 打开摄像头
    # holistic = Holistic()
    body_language = BodyLanguage()
    while cap.isOpened():
        ret, frame = cap.read()  # cap.read()按帧读取视频

        # Holistic可视化测试
        # frame = holistic.process_frame(frame)
        # holistic.show(frame)

        # BodyLanguage可视化测试
        frame = body_language.process_frame(frame)
        body_language.show(frame)

    cap.release()
    cv2.destroyAllWindows()


if __name__ == '__main__':
    run()
