import cv2
import mediapipe as mp
import numpy as np
import pyrealsense2 as rs
import math
import time
import ctypes
import os

# 获取当前运行的脚本的完整路径
script_path = os.path.abspath(__file__)
# print(f"当前运行的脚本路径: {script_path}")

# 获取当前运行的脚本所在的目录路径
script_dir = os.path.dirname(script_path)
# print(f"当前运行的脚本所在的目录路径: {script_dir}")
# 载入c++动态库
libLoad = ctypes.cdll.LoadLibrary
libPath = f"{script_dir}/../lib/libsharedTest.so"
print(libPath)
sharelib = libLoad(libPath)

class SharedData(ctypes.Structure):
    _fields_ = [
        ('type', ctypes.c_int),
        ('data', ctypes.c_double * 6)
    ]

shareddata = SharedData()
sharelib.setdata.argtype = SharedData
sharelib.getdata.restype = SharedData

# 初始化MediaPipe手部模型
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(static_image_mode=False, max_num_hands=1, min_detection_confidence=0.7)
mp_drawing = mp.solutions.drawing_utils

# 设置RealSense相机参数
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

# 启动相机流
pipeline.start(config)

# 相机到机器人坐标系的转换矩阵（示例，需要根据实际情况调整）
camera_to_robot_transform = np.array([
    [1, 0, 0, 0],  # x轴
    [0, 1, 0, 0],  # y轴
    [0, 0, 1, 0],  # z轴
    [0, 0, 0, 1]   # 平移
])

def wrist_to_camera_coordinates(wrist_landmark, depth_frame):
    # 获取深度信息
    depth_value = depth_frame.get_distance(int(wrist_landmark.x * 640), int(wrist_landmark.y * 480))
    #print(f"depth_value: {depth_value}")
    if depth_value == 0:
        return None  # 如果深度值为0，返回None

    # 计算空间坐标
    x = (wrist_landmark.x * 640 - 320) * depth_value / 525.0  # 525.0是相机焦距（可根据相机参数调整）
    y = (wrist_landmark.y * 480 - 240) * depth_value / 525.0
    z = depth_value
    return np.array([x, y, z])

def calculate_pose_in_camera(hand_landmarks, depth_frame):
    # 假设使用手腕和食指尖来计算姿态
    wrist_landmark  = hand_landmarks.landmark[mp_hands.HandLandmark.WRIST]
    index_finger_tip = hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP]

    # 获取3D坐标
    wrist_pos = np.array([wrist_landmark .x, wrist_landmark .y, wrist_landmark .z])
    #print(f"wrist_pos: {wrist_pos}")
    finger_tip_pos = np.array([index_finger_tip.x, index_finger_tip.y, index_finger_tip.z])

    # 计算手的方向向量
    direction_vector = finger_tip_pos - wrist_pos
    direction_vector /= np.linalg.norm(direction_vector)  # 单位化

    # 将方向向量转换为欧拉角（假设手向上）
    yaw = math.atan2(direction_vector[1], direction_vector[0])
    pitch = math.asin(direction_vector[2])
    roll = math.atan2(direction_vector[2], direction_vector[1])

    # 转换为相机坐标系下的空间坐标
    wrist_coordinates = wrist_to_camera_coordinates(wrist_landmark, depth_frame)
    if wrist_coordinates is not None:
        return np.array([wrist_coordinates[0], wrist_coordinates[1], wrist_coordinates[2], roll, pitch, yaw])
    return None
# 机器人控制函数（示例），具体需要根据需要通过逆解解算
def move_robot_to(hand_pose_in_camera):
    # 将弧度转化为角度
    hand_pose_in_camera[3] *= 57.3
    hand_pose_in_camera[4] *= 57.3 
    # 设置打印选项，关闭指数表示法
    np.set_printoptions(suppress=True)
    # 在这里添加机器人控制代码
    shareddata.type = 5
    for i in range(0, 6):
        shareddata.data[i] = hand_pose_in_camera[i]
    sharelib.setdata(shareddata)# 将数据发送到共享内存
    print("移动机器人末端到位姿: ", hand_pose_in_camera)
    #return

def main():
    while True:
        # 获取相机帧
        frames = pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()
        depth_frame = frames.get_depth_frame()

        if not color_frame or not depth_frame:
            continue
        
        # 转换为numpy数组
        image = np.asanyarray(color_frame.get_data())

        # 转换颜色空间
        image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image_rgb.flags.writeable = False

        # 使用 Mediapipe 检测手部
        results = hands.process(image_rgb)

        # 将图像转换回 BGR 格式
        image_rgb.flags.writeable = True
        annotated_image = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)

        if results.multi_hand_landmarks:
            for hand_landmarks in results.multi_hand_landmarks:
                mp_drawing.draw_landmarks(annotated_image, hand_landmarks, mp_hands.HAND_CONNECTIONS)

                pose_in_camera = calculate_pose_in_camera(hand_landmarks, depth_frame)
                if pose_in_camera is not None:
                    # 映射到机器人坐标系
                    #robot_coordinates = camera_to_robot_transform @ np.append(wrist_coordinates, 1)  # 同质坐标转换
                    move_robot_to(pose_in_camera)  # 移动机器人末端

                #print(f"手腕位姿: {pose_in_camera}")

        cv2.imshow('Hand Detection', annotated_image)
        
        if cv2.waitKey(1) & 0xFF == 27:  # 按Esc退出
            break

    # 停止相机流
    pipeline.stop()
    cv2.destroyAllWindows()

if __name__ == "__main__":
    main()
