from ultralytics import YOLO

import cv2
import numpy as np

# 初始化视频捕获对象
cap = cv2.VideoCapture("/media/starkpid/Document/flyList/project/03yumaoqiu/yumaoqiu.mp4")  # 替换为你的视频路径

# 创建可调整大小的显示窗口
cv2.namedWindow("Detection Result", cv2.WINDOW_NORMAL)
cv2.resizeWindow("Detection Result", 1600, 600)  # 加宽窗口用于并排显示

# Load a model
model = YOLO(model='/home/starkpid/pro/yolov12/runs/train/exp/weights/best.pt')  


while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        break

    results = model.predict(source=frame,
                    save=False,
                    show=False,
                    )
    
    # 获取带标注的帧
    annotated_frame = results[0].plot()  # 返回BGR格式的numpy数组
    
    # 调整两个帧的尺寸保持一致
    target_height = 600
    frame_resized = cv2.resize(frame, (int(frame.shape[1] * target_height / frame.shape[0]), target_height))
    annotated_resized = cv2.resize(annotated_frame, (frame_resized.shape[1], target_height))
    
    # 水平拼接原始帧和标注帧
    combined = np.hstack((frame_resized, annotated_resized))
    
    # 显示组合结果
    cv2.imshow("Detection Result", combined)
    
    if cv2.waitKey(30) & 0xFF == ord("q"):
        break