import cv2
import rclpy
from rclpy.node import Node
from ultralytics import YOLO
import numpy as np
from geometry_msgs.msg import PoseStamped, Point
from visualization_msgs.msg import Marker
from ament_index_python.packages import get_package_share_directory
import os

# A helper function to convert rotation vector to quaternion
# This is often needed for Pose messages in ROS
def rvec_to_quaternion(rvec):
    """
    Convert a rotation vector (Rodrigues vector) to a quaternion.
    """
    rotation_matrix, _ = cv2.Rodrigues(rvec)

    q = np.empty((4,), dtype=np.float64)
    trace = np.trace(rotation_matrix)

    if trace > 0:
        s = 0.5 / np.sqrt(trace + 1.0)
        q[3] = 0.25 / s  # w
        q[0] = (rotation_matrix[2, 1] - rotation_matrix[1, 2]) * s  # x
        q[1] = (rotation_matrix[0, 2] - rotation_matrix[2, 0]) * s  # y
        q[2] = (rotation_matrix[1, 0] - rotation_matrix[0, 1]) * s  # z
    else:
        if rotation_matrix[0, 0] > rotation_matrix[1, 1] and rotation_matrix[0, 0] > rotation_matrix[2, 2]:
            s = 2.0 * np.sqrt(1.0 + rotation_matrix[0, 0] - rotation_matrix[1, 1] - rotation_matrix[2, 2])
            q[3] = (rotation_matrix[2, 1] - rotation_matrix[1, 2]) / s
            q[0] = 0.25 * s
            q[1] = (rotation_matrix[0, 1] + rotation_matrix[1, 0]) / s
            q[2] = (rotation_matrix[0, 2] + rotation_matrix[2, 0]) / s
        elif rotation_matrix[1, 1] > rotation_matrix[2, 2]:
            s = 2.0 * np.sqrt(1.0 + rotation_matrix[1, 1] - rotation_matrix[0, 0] - rotation_matrix[2, 2])
            q[3] = (rotation_matrix[0, 2] - rotation_matrix[2, 0]) / s
            q[0] = (rotation_matrix[0, 1] + rotation_matrix[1, 0]) / s
            q[1] = 0.25 * s
            q[2] = (rotation_matrix[1, 2] + rotation_matrix[2, 1]) / s
        else:
            s = 2.0 * np.sqrt(1.0 + rotation_matrix[2, 2] - rotation_matrix[0, 0] - rotation_matrix[1, 1])
            q[3] = (rotation_matrix[1, 0] - rotation_matrix[0, 1]) / s
            q[0] = (rotation_matrix[0, 2] + rotation_matrix[2, 0]) / s
            q[1] = (rotation_matrix[1, 2] + rotation_matrix[2, 1]) / s
            q[2] = 0.25 * s
    return q


class BasketballDetectorNode(Node):
    def __init__(self):
        super().__init__('basketball_detector_node')

        # --- 参数定义 ---
        self.declare_parameter('video_path', r'/home/hitcrt/ujiufile/hitcrtvision/camp/test4/rgb.mp4')
        # self.declare_parameter('yolo_model_path', r'yolo11npost.pt')
        self.declare_parameter('conf_threshold', 0.6)
        self.declare_parameter('basketball_radius_m', 0.123)  # 标准7号篮球半径
        self.declare_parameter('camera_frame_id', 'camera_link')

            # 1. 动态获取功能包的共享目录路径
        package_share_directory = get_package_share_directory('python_basketball_detector')

            # 2. 构造模型的完整、绝对路径
        yolo_model_path = os.path.join(package_share_directory, 'models', 'yolov11npost.pt')

        # --- 加载参数 ---
        video_path = self.get_parameter('video_path').get_parameter_value().string_value
        # yolo_model_path = self.get_parameter('yolo_model_path').get_parameter_value().string_value
        self.conf_threshold = self.get_parameter('conf_threshold').get_parameter_value().double_value
        self.basketball_radius = self.get_parameter('basketball_radius_m').get_parameter_value().double_value
        self.camera_frame_id = self.get_parameter('camera_frame_id').get_parameter_value().string_value

        # --- 相机与3D模型定义 ---
        self.camera_matrix = np.array([[1.02676141e+03, 0.0, 6.40053118e+02],
                                       [0.0, 1.02660214e+03, 3.42002843e+02],
                                       [0.0, 0.0, 1.0]], dtype=np.float32)
        self.dist_coeffs = np.array([0.10857543, -0.2257256, 0.00053048, -0.00089479, 0.09388436], dtype=np.float32)

        self.object_points = np.array([
            [0, 0, 0],  # 球心
            [self.basketball_radius, 0, 0], [-self.basketball_radius, 0, 0],
            [0, self.basketball_radius, 0], [0, -self.basketball_radius, 0]
        ], dtype=np.float32)

        # --- 初始化模型和视频 ---
        self.get_logger().info(f"正在从以下路径加载模型: {yolo_model_path}")
        self.model = YOLO(yolo_model_path)
        self.cap = cv2.VideoCapture(video_path)
        if not self.cap.isOpened():
            self.get_logger().error(f"无法打开视频文件: {video_path}")
            raise SystemExit

        # --- ROS 2 发布者 ---
        self.pose_publisher = self.create_publisher(PoseStamped, '/basketball_pose', 10)
        self.trajectory_publisher = self.create_publisher(Marker, '/basketball_trajectory', 10)

        # --- 状态变量 ---
        self.frame_count = 0
        self.trajectory_points = []  # 用于存储轨迹点

        # --- 创建定时器以处理视频帧 ---
        video_fps = self.cap.get(cv2.CAP_PROP_FPS)
        timer_period = 1.0 / video_fps
        self.timer = self.create_timer(timer_period, self.process_frame)

        self.get_logger().info('篮球检测节点已初始化完成.')

    def process_frame(self):
        """
        定时器回调函数，用于读取和处理单帧视频
        """
        ret, frame = self.cap.read()
        if not ret:
            self.get_logger().info('视频处理完成或读取出错，节点将关闭。')
            self.destroy_node()
            rclpy.shutdown()
            return

        self.frame_count += 1

        # 1. 在图像中找到所有篮球
        detections = self.find_basketballs(frame)
        self.get_logger().info(f"--- 帧 {self.frame_count}: 检测到 {len(detections)} 个篮球 ---")

        # 2. 对每个检测到的篮球进行处理
        for detection in detections:
            # 3. 解算PnP，获取3D位姿
            success, rvec, tvec = self.solve_pnp_for_ball(detection)

            if success:
                # 4. 发布位姿和轨迹
                self.publish_pose(tvec, rvec)
                self.publish_trajectory(tvec)

                # 可选：在画面上绘制结果用于调试
                self.draw_debug_info(frame, tvec, detection['box'])

        # 可选：显示处理后的视频帧
        # cv2.imshow("Basketball Detection", frame)
        cv2.waitKey(1)

    def find_basketballs(self, frame):
        """
        使用YOLO模型在单帧中检测所有篮球，并返回其2D信息列表
        """
        results = self.model(frame, conf=self.conf_threshold, verbose=False)
        detections = []

        for result in results:
            for box in result.boxes:
                if self.model.names[int(box.cls[0])] == 'basketball':  # 确保是篮球
                    x1, y1, x2, y2 = [int(coord) for coord in box.xyxy[0]]
                    center_x = (x1 + x2) / 2
                    center_y = (y1 + y2) / 2
                    # 使用边界框的平均半径作为像素半径
                    pixel_radius = ((x2 - x1) + (y2 - y1)) / 4.0

                    detections.append({
                        'center_x': center_x,
                        'center_y': center_y,
                        'pixel_radius': pixel_radius,
                        'box': (x1, y1, x2, y2)
                    })
        return detections

    def solve_pnp_for_ball(self, detection):
        """
        为单个篮球检测结果计算solvePnP
        """
        cx, cy, radius = detection['center_x'], detection['center_y'], detection['pixel_radius']

        image_points = np.array([
            [cx, cy], [cx + radius, cy], [cx - radius, cy],
            [cx, cy - radius], [cx, cy + radius]
        ], dtype=np.float32)

        success, rvec, tvec = cv2.solvePnP(self.object_points, image_points,
                                           self.camera_matrix, self.dist_coeffs)
        return success, rvec, tvec

    def publish_pose(self, tvec, rvec):
        """
        发布一个PoseStamped消息
        """
        pose_msg = PoseStamped()
        pose_msg.header.stamp = self.get_clock().now().to_msg()
        pose_msg.header.frame_id = self.camera_frame_id

        # 位置
        pose_msg.pose.position.x = float(tvec[0])
        pose_msg.pose.position.y = float(tvec[1])
        pose_msg.pose.position.z = float(tvec[2])

        # 姿态（从旋转向量转换为四元数）
        quaternion = rvec_to_quaternion(rvec)
        pose_msg.pose.orientation.x = quaternion[0]
        pose_msg.pose.orientation.y = quaternion[1]
        pose_msg.pose.orientation.z = quaternion[2]
        pose_msg.pose.orientation.w = quaternion[3]

        self.pose_publisher.publish(pose_msg)
        self.get_logger().info(f"发布篮球位置: X={tvec[0][0]:.2f}, Y={tvec[1][0]:.2f}, Z={tvec[2][0]:.2f}")

    def publish_trajectory(self, tvec):
        """
        发布一个Marker消息来可视化轨迹
        """
        # 添加新点到轨迹列表
        p = Point()
        p.x, p.y, p.z = float(tvec[0]), float(tvec[1]), float(tvec[2])
        self.trajectory_points.append(p)

        # 创建Marker消息
        marker_msg = Marker()
        marker_msg.header.stamp = self.get_clock().now().to_msg()
        marker_msg.header.frame_id = self.camera_frame_id
        marker_msg.ns = "basketball_trajectory"
        marker_msg.id = 0
        marker_msg.type = Marker.LINE_STRIP
        marker_msg.action = Marker.ADD
        marker_msg.points = self.trajectory_points

        # 设置线的属性
        marker_msg.scale.x = 0.02  # 线宽
        marker_msg.color.a = 1.0
        marker_msg.color.r = 1.0
        marker_msg.color.g = 0.5
        marker_msg.color.b = 0.0

        self.trajectory_publisher.publish(marker_msg)

    def draw_debug_info(self, frame, tvec, box):
        """
        在视频帧上绘制调试信息
        """
        x1, y1, x2, y2 = box
        cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)

        text = f"Z: {tvec[2][0]:.2f} m"
        cv2.putText(frame, text, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)

    def __del__(self):
        # 节点销毁时释放资源
        self.cap.release()
        cv2.destroyAllWindows()


def main(args=None):
    rclpy.init(args=args)
    node = BasketballDetectorNode()
    try:
        rclpy.spin(node)
    except KeyboardInterrupt:
        pass
    finally:
        node.destroy_node()
        rclpy.shutdown()


if __name__ == '__main__':
    main()
