import cv2
from ultralytics import YOLO

# 加载 YOLOv8 模型
model = YOLO(r"E:\项目\松山湖公安分局无人机自动巡检项目\事故检测\标注数据\OBB\250423\train18\weights\best.pt")  # 你的 YOLO 模型路径
# model.export(format="engine",
#              dynamic = True)
# model(r"F:\data\02\test\frame_000000.jpg",save = True)
# # 输入视频路径
input_video_path = r"E:\项目\松山湖公安分局无人机自动巡检项目\事故检测\无人机事故视频-云遥\0507\1816437285179953152181735212_20250429165916-00.08.20.401-00.11.49.237-seg2.mp4"
output_video_path = 'output_video.mp4'

# 打开视频文件
cap = cv2.VideoCapture(input_video_path)

# 获取视频的基本信息
fps = int(cap.get(cv2.CAP_PROP_FPS))  # 获取原视频帧率
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))  # 视频宽度
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))  # 视频高度
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))  # 总帧数

# 定义视频写入器，保持原视频帧率
fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # 编码格式
out = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height))

frame_idx = 0  # 记录处理的帧数
while cap.isOpened():
    ret, frame = cap.read()  # 读取一帧
    if not ret:
        break  # 读完所有帧后退出

    frame_idx += 1
    print(f'Processing frame {frame_idx}/{frame_count}')

    # YOLO 推理
    results = model(frame,imgsz=(height,width))  # 直接处理单帧图像

    # 在帧上绘制检测结果
    for result in results:

        if result.boxes is None:
            continue
        for box in result.boxes:  # 遍历所有检测框
            x1, y1, x2, y2 = map(int, box.xyxy[0])  # 获取边界框坐标
            conf = box.conf[0].item()  # 置信度
            cls = int(box.cls[0])  # 类别索引

            # 画框
            color = (0, 255, 0)  # 绿色
            cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
            label = f"ID: {cls} ({conf:.2f})"
            cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
        cv2.putText(
            frame,
            f"Detect Count: ",
            (10, 30),
            cv2.FONT_HERSHEY_SIMPLEX,
            1,
            (0, 255, 0),
            2,
        )
    # 写入视频
    out.write(frame)

# 释放资源
cap.release()
out.release()
cv2.destroyAllWindows()

print("推理完成，视频已保存！")
