"""
Mediapipe入门——搭建姿态检测模型并实时输出人体关节点3d坐标（2024.1.4更新）
https://blog.csdn.net/kalakalabala/article/details/121530651
"""


import cv2
import numpy as np
import os
os.environ["DISPLAY"] = ":0"
from threading import Thread
import mediapipe as mp
from unitree_sdk2py.core.channel import ChannelFactoryInitialize
from unitree_sdk2py.go2.video.video_client import VideoClient


from rknnWorkflow import GestureWorkflow

from ui import OpenCVUI

# 在程序开头设置DISPLAY环境变量，等价于export DISPLAY=:0


frame_cnt = 0

def get_image():
    global frame_cnt
    while ui.running:
        code, data = client.GetImageSample()
        if code != 0 or data is None:
            print("Get image sample error. code:", code)
            return None
        
        image_data = np.frombuffer(bytes(data), dtype=np.uint8)
        image = cv2.imdecode(image_data, cv2.IMREAD_COLOR)
        image = cv2.resize(image, (470, 390), interpolation=cv2.INTER_LINEAR)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) #BGR图转RGB
        # 手势关键点提取
        results = hands.process(image) #处理三通道彩色图
        if results.multi_hand_landmarks:  # 绘制手部关键点
            for hand_landmarks in results.multi_hand_landmarks:
                mp_drawing.draw_landmarks(
                    image,
                    hand_landmarks,
                    mp_hands.HAND_CONNECTIONS,
                    mp_drawing_styles.get_default_hand_landmarks_style(),
                    mp_drawing_styles.get_default_hand_connections_style())

        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) #RGB转BGR

        

        
        
        image_label.set_image(image)

        frame_cnt += 1

        if frame_cnt % 5 == 0:
            frame_cnt = 0
            # 执行推理
            if results.multi_hand_landmarks:
                # 只使用检测到的第一只手
                landmarks = results.multi_hand_world_landmarks[0]
                
                # 提取关键点坐标
                keypoints = []
                for landmark in landmarks.landmark:
                    keypoints.extend([landmark.x, landmark.y, landmark.z])

                keypoints = np.array(keypoints, dtype=np.float32)
                # 归一化关键点
                normalized_kps = model.normalize_keypoints(keypoints)
                # 准备输入数据 (1, 63) 形状
                input_data = normalized_kps.reshape(1, -1).astype(np.float32)
                # 使用RKNN Lite进行推理
                outputs = model.rknn_lite.inference(inputs=[input_data])
                predictions = outputs[0][0]

                # 获取预测结果
                class_id = np.argmax(predictions)
                confidence = float(predictions[class_id])

                # 获取类别标签
                label = model.class_labels[class_id] if class_id < len(model.class_labels) else f"未知类别 {class_id}"

                text_label.set_text(f"识别结果: {label} ({confidence*100:.2f}%)")



def exit_click():
    print("退出程序")
    ui.destroy()




# UI显示
ui = OpenCVUI("Hand", 480, 800, fullscreen=True)

image_label = ui.add_label(5, 5, 470, 390,  align="center")

ui.add_frame(5, 405, 470, 300)
text_label = ui.add_label(5, 500, 470, 300, text="这是一个标签", align="center")

ui.add_button(180, 740, 120, 40, "退出", exit_click)


# Go2连接
ChannelFactoryInitialize(0, "enx00e0986113a6")

client = VideoClient()
client.SetTimeout(3.0)
client.Init()

# 手势检测
# 获取当前脚本所在目录
script_dir = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(script_dir, "model.rknn")
model = GestureWorkflow(model_path)

mp_hands = mp.solutions.hands
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
hands = mp_hands.Hands(min_detection_confidence=0.5, min_tracking_confidence=0.5)




thread = Thread(target=get_image)
thread.start()



ui.mainloop()





