from ultralytics import YOLO

import cv2


if __name__ == '__main__':

    model = YOLO('result/train10/weights/best.pt')
    video_path = 'dataset/video1737331921197.mp4'  # 支持MP4/AVI/MOV等常见格式

    cap = cv2.VideoCapture(video_path)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)  # 降低解码宽度
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)  # 降低解码高度

    height, width = 360, 640

    # 定义视频编解码器和创建VideoWriter对象
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    video_name = 'result.mp4'  # 输出视频文件名
    out = cv2.VideoWriter(video_name, fourcc, 10.0, (width, height))
    images = []

    flag = False
    # Loop through the video frames
    while cap.isOpened():
        # Read a frame from the video
        success, frame = cap.read()

        if success:
            # Run YOLO inference on the frame
            results = model(frame, conf=0.8, iou=0.45, device='0')

            # Visualize the results on the frame
            annotated_frame = results[0].plot()

            # Display the annotated frame
            cv2.imshow("YOLO Inference", annotated_frame)

            if flag:
                print(1111)
                out.write(annotated_frame)

            if cv2.waitKey(1) & 0xFF == ord('s'):
                flag = True

            if cv2.waitKey(1) & 0xFF == ord('d'):
                flag = False

            # Break the loop if 'q' is pressed
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break
        else:
            # Break the loop if the end of the video is reached
            break

    # Release the video capture object and close the display window
    cap.release()
    cv2.destroyAllWindows()
    out.release()

    # 离线预测
    # predict_args = {
    #     'source': video_path,
    #     'stream': True,    # 防止内存溢出
    #     'conf': 0.25,      # 检测置信度阈值（过滤低置信度框）‌:ml-citation{ref="1,6" data="citationList"}
    #     'iou': 0.45,       # 非极大值抑制的交并比阈值‌:ml-citation{ref="1,8" data="citationList"}
    #     'imgsz': 640,      # 输入分辨率（需与训练分辨率一致）‌:ml-citation{ref="5,8" data="citationList"}
    #     # 'save': True,    # 自动保存带检测结果的视频‌:ml-citation{ref="1,7" data="citationList"}
    #     'device': '0'      # 指定GPU（若为CPU则设为'cpu'）‌:ml-citation{ref="1,5" data="citationList"}
    # }
    #
    # # 可选参数（根据需求添加）
    # predict_args.update({
    #     'show': True,    # 实时显示检测画面
    #     'max_det': 300,  # 单帧最大检测数‌:ml-citation{ref="1" data="citationList"}
    #     'vid_stride': 1  # 隔帧检测（加速处理）‌:ml-citation{ref="1,6" data="citationList"}
    # })
    #
    # results = model.predict(**predict_args)
    # # 遍历触发保存
    # for r in results:
    #     boxes = r.boxes  # Boxes object for bbox outputs
    #     masks = r.masks  # Masks object for segment masks outputs
    #     probs = r.probs  # Class probabilities for classification outputs