#yolo track model=best.pt source=0 tracker=bytetrack.yaml persist=True show conf=0.4
from ultralytics import YOLO
import cv2

model = YOLO("best.pt")                        # 加载权重
stream = model.track(
        source=0,                              # 0 = 默认摄像头；也可换成视频/RTSP
        tracker="bytetrack.yaml",              # 或 botsort.yaml
        persist=True,                          # 长时运行复用内部 tracker
        show=True,                             # 实时弹窗
        stream=True)                           # 返回生成器

for frame_res in stream:                       # 每帧结果
    for box in frame_res.boxes:
        tid = int(box.id[0])
        cls = int(box.cls[0])
        conf = float(box.conf[0])
        xyxy = box.xyxy[0].cpu().numpy()
        # TODO: 业务逻辑，比如发布 ROS topic


#!/usr/bin/env python3
import rospy, rospkg, numpy as np
from sensor_msgs.msg import Image
from vision_msgs.msg import Detection2D, Detection2DArray, BoundingBox2D
from cv_bridge import CvBridge
from ultralytics import YOLO

class YoloTrackNode:
    def __init__(self):
        # 参数
        model_path   = rospy.get_param("~weights", "best.pt")
        tracker_yaml = rospy.get_param("~tracker_cfg", "bytetrack.yaml")
        conf_thres   = rospy.get_param("~conf", 0.4)
        self.bridge  = CvBridge()

        # 加载 YOLOv11-Track
        self.yolo = YOLO(model_path)
        self.yolo.fuse()                              # 小幅增速
        self.tracker_cfg = tracker_yaml
        self.conf = conf_thres

        # ROS I/O
        image_topic = rospy.get_param("~image", "/camera/image_raw")
        self.pub = rospy.Publisher("detections", Detection2DArray, queue_size=3)
        rospy.Subscriber(image_topic, Image, self.cb, queue_size=1, buff_size=2**24)

        rospy.loginfo("🟢 YOLOv11 Tracker Node ready.")
    
    def cb(self, msg):
        # ROS → OpenCV
        frame = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
        # track() 单帧调用：stream=False -> 直接返回结果列表
        results = self.yolo.track(
            source=frame,
            tracker=self.tracker_cfg,
            persist=True,
            conf=self.conf,
            iou=0.5,
            stream=False)
        
        det_array = Detection2DArray()
        det_array.header = msg.header

        for box in results[0].boxes:
            det = Detection2D()
            det.bbox = BoundingBox2D(
                center=BoundingBox2D().center,  # 占位
                size_x=float(box.xyxy[0][2]-box.xyxy[0][0]),
                size_y=float(box.xyxy[0][3]-box.xyxy[0][1]))
            det.results.append(Detection2D().results[0])  # score & class
            det.id = int(box.id[0]) if box.id is not None else -1
            det_array.detections.append(det)

        self.pub.publish(det_array)

if __name__ == "__main__":
    rospy.init_node("yolov11_tracker_node")
    YoloTrackNode()
    rospy.spin()
# catkin 文件
# cmake_minimum_required(VERSION 3.0)
# project(yolov11_tracker)

# find_package(catkin REQUIRED COMPONENTS
#   rospy sensor_msgs vision_msgs cv_bridge)

# catkin_package()

# catkin_install_python(
#   PROGRAMS scripts/yolov11_tracker_node.py
#   DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION})

# install(DIRECTORY launch DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION})


#launch 文件
# <launch>
#   <arg name="weights" default="$(find yolov11_tracker)/weights/best.engine"/>
#   <node pkg="yolov11_tracker" type="yolov11_tracker_node.py" name="yolo_track"
#         output="screen">
#     <param name="weights" value="$(arg weights)"/>
#     <param name="tracker_cfg" value="bytetrack.yaml"/>
#     <param name="image"  value="/usb_cam/image_raw"/>
#     <param name="conf"   value="0.4"/>
#   </node>
# </launch>
