import multiprocessing
import time

import cv2
from ultralytics import YOLO


queue_socket_to_detect = multiprocessing.Queue()

def start_detect():

 
    # 打开摄像头
    cap = cv2.VideoCapture(0)

    # 视频文件路径
    video_path = 'video/2.mp4'

    # 创建VideoCapture对象
    # cap = cv2.VideoCapture(video_path)

    model = YOLO('/home/uds/code/yolov8/runs/detect/train6/weights/best.pt')  # 这里用你训练完以后保存的模型文件
    # 开始采集和推流
    while True:
        # 采集一帧图像
        ret, frame = cap.read()

        if ret:

            cmd = None
            data = None
            try:
                socket_data = queue_socket_to_detect.get(block=False)
                cmd = socket_data['cmd']
                data = socket_data['data']
            except:
                pass

            results = model(frame, device="cuda")
            # 标注后的图片
            res = results[0]
            annotated_image = res.plot()
            # 通过FFmpeg编码和推流
            cv2.imshow('YoloV8 Inference', annotated_image)

            names = res.names
            indexes = res.boxes.cls
            posPoint = res.boxes.xyxy.cpu().numpy().astype('uint32')
            x = -1
            y = -1
            if len(posPoint) > 0:
                x = posPoint[0][0]
                y = posPoint[0][1]

            print("X=", x, "Y=", y)
        key = cv2.waitKey(1)

        if key == ord('q') or key == 27:  # 'q'或者ESC都会触发退出条件
            break

    cap.release()
    cv2.waitKey(0)
    cv2.destroyAllWindows


if __name__ == '__main__':
    start_detect()