#!/home/aixiaoxiao/miniconda3/envs/yolov8/bin/python
# -*- coding: utf-8 -*-

# 此程序用于实现视频分帧识别物体,并为所识别的物品添加矩形框，显示置信度、标签等，更新于2024/6/24
# 更新程序，用于显示实时三维坐标2024/6/24
# PWMFeedback: ['#021P1461!', '#022P0995!', '#023P2222!', '#024P0818!', '#025P1500!']

# [0.09189158511750145, -1.1898782175471343, 1.7011724219188729, 1.6069246423111792, 0.0]


import rospy
import cv2
import tf
import numpy as np
import pyrealsense2 as rs
from ultralytics import YOLO  # 将YOLOv8导入到该py文件中
from geometry_msgs.msg import PoseStamped
from scipy.spatial.transform import Rotation as R
 
# 加载官方或自定义模型
model = YOLO("best.pt")  # 加载一个官方的检测模型
 
# 深度相机配置
pipeline = rs.pipeline()  # 定义流程pipeline，创建一个管道
config = rs.config()  # 定义配置config
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)  # 初始化摄像头深度流
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
pipe_profile = pipeline.start(config)  # 启用管段流
align = rs.align(rs.stream.color)  # 这个函数用于将深度图像与彩色图像对齐


# Park算法的标定参数（end_link->camera）
translation = np.array([-0.0214793, -0.0486306, -0.0450709])  # x,y,z (m)
quaternion = [0.9956458782, -0.0894632558, 0.0188356071, 0.0181887597]  # w,x,y,z

# base_link -> end_link 的变换参数(写死的一个参数)
translation_base_to_end = np.array([-0.073, 0.007, 0.072])  # [x, y, z] (m)
quaternion_base_to_end = [0.655, 0.590, -0.316, -0.349]       # [x, y, z, w] (ROS四元数格式)

# 构建齐次变换矩阵 T_hand_eye (end_link->camera)
rotation = R.from_quat(quaternion).as_matrix()
T_hand_eye = np.eye(4)
T_hand_eye[:3, :3] = rotation
T_hand_eye[:3, 3] = translation
# 注意：实际需要的是 camera->end_link 的矩阵
camera_link_to_TCP_link = np.linalg.inv(T_hand_eye)  # 求逆矩阵，用于坐标变换
print(f"T_hand_eye: \n {T_hand_eye}")

# 构建旋转矩阵（注意：ROS四元数格式为 [x, y, z, w]）
rotation_base_to_end = R.from_quat(quaternion_base_to_end).as_matrix()
# 构建齐次变换矩阵 T_base_to_end
T_base_to_end = np.eye(4)
T_base_to_end[:3, :3] = rotation_base_to_end
T_base_to_end[:3, 3] = translation_base_to_end
# 步骤2: end_link -> base_link
T_end_to_base = np.linalg.inv(T_base_to_end)
print(f"T_base_to_end: \n {T_base_to_end}")
print(f"T_end_to_base: \n {T_end_to_base}")

 
def get_aligned_images():  # 定义一个获取图像帧的函数，返回深度和彩色数组
    frames = pipeline.wait_for_frames()  # 等待获取图像帧
    aligned_frames = align.process(frames)  # 获取对齐帧，将深度框与颜色框对齐
    depth_frame = aligned_frames.get_depth_frame()  # 获取深度帧
    color_frame = aligned_frames.get_color_frame()  # 获取对齐帧中的的color帧
    depth_image = np.asanyarray(depth_frame.get_data())  # 将深度帧转换为NumPy数组
    color_image = np.asanyarray(color_frame.get_data())  # 将彩色帧转化为numpy数组
   
    # 获取深度内参
    depth_intri = depth_frame.profile.as_video_stream_profile().intrinsics
    # 获取彩色内参
    color_intri = color_frame.profile.as_video_stream_profile().intrinsics
    # 命令行输出内参检查
    # print("Depth Intrinsics:",depth_intri)
    # print("Color Intrinsics:",color_intri)
    
    # cv2.applyColorMap（）将深度图像转化为彩色图像，以便更好的可视化分析
    depth_colormap = cv2.applyColorMap(
                cv2.convertScaleAbs(depth_image, alpha=0.07), cv2.COLORMAP_JET)
    # 返回深度内参、对齐深度帧、彩色图像
    return depth_intri, depth_frame, color_image
 
def compute_target_pose_in_base_link(ux, uy, distance, depth_intri, T_base_to_end, T_hand_eye):
    """
    Given a pixel (ux, uy) and the distance at that pixel, compute the 3D pose of the target in the base link frame.
    
    Parameters:
    -----------
    ux : int
        The pixel's x-coordinate (column index).
    uy : int
        The pixel's y-coordinate (row index).
    distance : float
        The distance in meters at pixel (ux, uy).
    depth_intri : rs.intrinsics
        The RealSense intrinsics for the depth frame.
    T_base_to_end : np.ndarray
        A 4x4 homogeneous transform from base_link to end effector (end_link).
    T_hand_eye : np.ndarray
        A 4x4 homogeneous transform from end_link to camera_link (a.k.a. the hand-eye transform).
        (In many setups, T_hand_eye = T_end_link_to_camera_link, or its inverse.)
    
    Returns:
    --------
    PoseStamped
        The pose of the target in base_link coordinates.
    """
    # 1) Deproject the pixel (ux, uy) to 3D camera coordinates
    camera_xyz = rs.rs2_deproject_pixel_to_point(depth_intri, (ux, uy), distance)

    # 2) Adjust for any sign or orientation flips your setup requires
    #    (Depending on how the camera is mounted, you may or may not need this.)
    camera_xyz = np.array(camera_xyz) * -1

    # 3) Build the 4x4 transform from camera_link to target_link
    camera_link_to_target_link = np.eye(4)
    camera_link_to_target_link[:3, 3] = camera_xyz

    # 4) Compute the pose of the target in base_link frame:
    #    target_link_to_base_link = T_base_to_end @ T_hand_eye @ camera_link_to_target_link
    target_link_to_base_link = T_base_to_end @ T_hand_eye @ camera_link_to_target_link

    # 5) Extract translation for PoseStamped
    target_pose = PoseStamped()
    target_pose.header.frame_id = "base_link"
    target_pose.pose.position.x = target_link_to_base_link[0, 3]
    target_pose.pose.position.y = target_link_to_base_link[1, 3]
    target_pose.pose.position.z = target_link_to_base_link[2, 3]

    # 6) Extract and convert rotation to a quaternion
    #    tf.transformations.quaternion_from_matrix expects a 4x4 transform
    quaternion = tf.transformations.quaternion_from_matrix(target_link_to_base_link)
    target_pose.pose.orientation.x = quaternion[0]
    target_pose.pose.orientation.y = quaternion[1]
    target_pose.pose.orientation.z = quaternion[2]
    target_pose.pose.orientation.w = quaternion[3]

    return target_pose

if __name__ == '__main__':
    rospy.init_node('predict_node', anonymous=True)
    # 创建一个发布者，发布到 /chatter 话题，消息类型为 String，队列长度为 10
    pub = rospy.Publisher("/camera_target_pose", PoseStamped, queue_size=10)
    # 设置发布频率为 15 Hz
    rate = rospy.Rate(15)  # 15 Hz
    while not rospy.is_shutdown():

        depth_intri, depth_frame, color_image = get_aligned_images()  # 获取深度帧和彩色帧
        source = [color_image]
        # 轨迹追踪，persist=true表示数据储存
        # results = model.track(source, persist=True)
        results = model.predict(source, save=False)
        # 预测完后打印目标框
        for result in results:
            # 获取边框列表，其中每个边界框由中心点坐标、宽度、高度组成
            boxes = result.boxes.xywh.tolist()
            # 逐个绘图
            im_array = result.plot()
            
            for i in range(len(boxes)):  # 遍历boxes列表
                # 将中心点坐标位置转化为整型，并赋值给ux和uy
                ux, uy = int(boxes[i][0]), int(boxes[i][1])
                # 得到深度帧中的对应坐标处的距离
                dis = depth_frame.get_distance(ux, uy)
                # 将指定深度帧的像素坐标和距离值转化为相机坐标系下的坐标x，y，z
                camera_xyz = rs.rs2_deproject_pixel_to_point(
                    depth_intri, (ux, uy), dis)
                # 将x，y，z转化成3位小数的Numpy数组
                camera_xyz = np.round(np.array(camera_xyz), 6)
                camera_xyz *= -1
                # 将单位转化为mm
                #camera_xyz = np.array(list(camera_xyz)) * 1000
                # 转化为一个列表
                camera_xyz = list(camera_xyz)
                # 在im_array上绘制圆形，圆心坐标为ux，uy，半径为4个像素
                # 颜色为（255，255，255），线条宽度为5个像素
                cv2.circle(im_array, (ux, uy), 4, (255, 255, 255), 5)  # 标出中心点
                # 在im_array上绘制文本框，文本框内容为camera_xyz
                # 位置为(ux + 20, uy + 10)，0字体类型，0.5字体大小，255，255，255字体颜色
                # 最后两个参数为线宽和线型
                cv2.putText(im_array, str(camera_xyz), (ux + 20, uy + 10), 0, 0.5,
                            [225, 255, 255], thickness=1, lineType=cv2.LINE_AA)  # 标出坐标

                # 构建 4×4 的齐次变换矩阵
                camera_link_to_target_link = np.eye(4)  # 创建一个 4×4 的单位矩阵
                camera_link_to_target_link[:3, 3] = np.array(camera_xyz)  # 将平移向量 camera_xyz 放入最后一列

                target_pose = compute_target_pose_in_base_link(
                    ux, uy, dis,
                    depth_intri,
                    T_base_to_end,
                    T_hand_eye
                )
                print(f"target_link_pose: \n{target_pose}")

                # 发布消息
                pub.publish(target_pose)

                # 按照设定的频率休眠
                rate.sleep()
        
        # 设置窗口，窗口大小根据图像自动调整
        cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
        # 将图像images显示在窗口中，这个显示的是带有追踪结果的图像
        cv2.imshow('RealSense', im_array)
        key = cv2.waitKey(1)  # 等待用户输入
        # Press esc or 'q' to close the image window
        if key & 0xFF == ord('q') or key == 27:
            cv2.destroyAllWindows()
            pipeline.stop()
            break

    # Stop streaming
    pipeline.stop()