#!/usr/bin/env python3
import rospy
import cv2
import threading
import queue
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge
from ultralytics import YOLO
import numpy as np
import torch
import message_filters
from geometry_msgs.msg import Point


class YoloRosDepthDetector:
    def __init__(self):
        rospy.init_node('yolo_ros_depth_detector', anonymous=True)
        self.target_pub = rospy.Publisher("/target_xyz", Point, queue_size=10)

        # 加载 YOLO 模型到 GPU（如果可用）
        self.model = YOLO("bestcolor.pt")
        device = "cuda" if torch.cuda.is_available() else "cpu"
        self.model.to(device)
        rospy.loginfo("是否启用CUDA: %s", torch.cuda.is_available())

        self.bridge = CvBridge()

        # 相机内参
        self.fx = self.fy = self.cx = self.cy = None
        rospy.Subscriber("/camera/depth/camera_info", CameraInfo, self.caminfo_cb)

        # 多线程队列（最多只保留最新的一帧，避免堆积）
        self.frame_queue = queue.Queue(maxsize=1)
        self.stop_event = threading.Event()

        # 启动推理线程
        self.infer_thread = threading.Thread(target=self.infer_loop)
        self.infer_thread.daemon = True
        self.infer_thread.start()

        # 同步订阅 RGB + Depth
        rgb_sub = message_filters.Subscriber("/camera/color/image_raw", Image)
        depth_sub = message_filters.Subscriber("/camera/depth/image_raw", Image)
        ts = message_filters.ApproximateTimeSynchronizer([rgb_sub, depth_sub], queue_size=5, slop=0.1)
        ts.registerCallback(self.image_depth_callback)

    def caminfo_cb(self, msg):
        self.fx = msg.K[0]
        self.fy = msg.K[4]
        self.cx = msg.K[2]
        self.cy = msg.K[5]
        #rospy.loginfo("相机内参 fx=%.2f fy=%.2f cx=%.2f cy=%.2f", self.fx, self.fy, self.cx, self.cy)

    def image_depth_callback(self, rgb_msg, depth_msg):
        # 转换 RGB
        try:
            frame = self.bridge.imgmsg_to_cv2(rgb_msg, desired_encoding='bgr8')
        except Exception as e:
            rospy.logerr("RGB图像转换失败: %s", str(e))
            return

        # 转换 Depth
        try:
            depth_frame = self.bridge.imgmsg_to_cv2(depth_msg, desired_encoding='passthrough')
        except Exception as e:
            rospy.logerr("深度图转换失败: %s", str(e))
            return

        # 把最新帧放入队列（如果队列已满，丢弃旧帧）
        if self.frame_queue.full():
            try:
                self.frame_queue.get_nowait()
            except queue.Empty:
                pass
        self.frame_queue.put((frame, depth_frame))

    def infer_loop(self):
        """独立线程：不断从队列取帧做推理"""
        while not rospy.is_shutdown() and not self.stop_event.is_set():
            try:
                frame, depth_frame = self.frame_queue.get(timeout=0.1)
            except queue.Empty:
                continue

            # YOLO 推理（GPU）
            results = self.model.predict(source=frame, imgsz=640, conf=0.7, verbose=False)
            annotated_frame = results[0].plot()

            # 遍历检测框
            for box in results[0].boxes.xyxy.cpu().numpy():
                x1, y1, x2, y2 = box.astype(int)
                cx = int((x1 + x2) / 2.0)
                cy = int((y1 + y2) / 2.0)

                # 查深度
                if 0 <= cx < depth_frame.shape[1] and 0 <= cy < depth_frame.shape[0]:
                    depth_raw = int(depth_frame[cy, cx])
                    if depth_raw > 0:
                        Z = depth_raw / 1000.0
                        # cv2.putText(
                        #     annotated_frame,
                        #     f"({cx},{cy},{Z:.2f}m)",
                        #     (x1, max(y1 - 10, 0)),
                        #     cv2.FONT_HERSHEY_SIMPLEX,
                        #     0.6, (0, 255, 0), 2
                        # )
                        # rospy.loginfo("目标中心: (%d,%d), 深度: %.3f m", cx, cy, Z)
                        if self.fx and self.fy and self.cx and self.cy:
                            X = (cx - self.cx) * Z / self.fx
                            Y = (cy - self.cy) * Z / self.fy
                            rospy.loginfo("目标相机坐标: X=%.3f, Y=%.3f, Z=%.3f (m)", X, Y, Z)
                            point_msg = Point()
                            point_msg.x = X
                            point_msg.y = Y
                            point_msg.z = Z
                            self.target_pub.publish(point_msg)
                            #cv2.putText(
                                #annotated_frame,
                                #f"({X:.2f},{Y:.2f},{Z:.2f})m",
                                #(x1, max(y1 - 10, 0)),
                                #cv2.FONT_HERSHEY_SIMPLEX,
                                #0.6, (0, 255, 0), 2
                            #)
                        else:
                            rospy.logwarn("相机内参未准备好")    
                    else:
                        rospy.logwarn("深度无效 at (%d,%d)", cx, cy)

            # 显示（如果不需要显示，可注释掉）
            cv2.imshow("YOLOv11 + Depth", annotated_frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                rospy.signal_shutdown("手动退出")

    def shutdown(self):
        self.stop_event.set()
        self.infer_thread.join()
        cv2.destroyAllWindows()

if __name__ == "__main__":
    try:
        detector = YoloRosDepthDetector()
        rospy.spin()
    except rospy.ROSInterruptException:
        pass
    finally:
        detector.shutdown()
