import cv2
import supervision as sv
from supervision import Color
from ultralytics import YOLO
import numpy as np
import matplotlib.pyplot as plt

model = YOLO('yolov8n.pt')

input_path = 'videos/line.mp4'

# 获取视频第一针信息
generator = sv.get_video_frames_generator(input_path)
iterator = iter(generator)
frame = next(iterator)

frame.shape

plt.imshow(frame[:, :, ::-1])
plt.show()

# 越线检测位置坐标
LINE_START = sv.Point(0, 500)
LINE_END = sv.Point(1200, 500)
line_counter = sv.LineZone(start=LINE_START, end=LINE_END)

# 线的可视化配置
line_color = Color(r=238, g=255, b=0)
line_annotator = sv.LineZoneAnnotator(thickness=5, text_thickness=2, text_scale=1, color=line_color)

# 目标检测结果可视化框配置
box_annotator = sv.BoxAnnotator(thickness=2, text_thickness=1, text_scale=0.5)


# 封装检测函数
def process_frame(input_frame):
    results = model.track(input_frame, persist=True)
    for result in results:
        # 处理的原始图像
        out_frame = result.orig_img

        # 用supervision进行解析
        detections = sv.Detections.from_yolov8(result)

        # 解析追踪ID
        detections.tracker_id = result.boxes.id.cpu().numpy().astype(int)

        # 获取每个目标的：追踪ID、类别名称、置信度
        class_ids = detections.class_id  # 类别ID
        confidences = detections.confidence  # 置信度
        tracker_ids = detections.tracker_id  # 多目标追踪ID
        labels = ['#{} {} {:.1f}'.format(tracker_ids[i], model.names[class_ids[i]], confidences[i] * 100) for i in
                  range(len(class_ids))]

        # 绘制目标检测可视化结果
        box_annotator.annotate(scene=out_frame, detections=detections, labels=labels)

        # 进行越线检测
        line_counter.trigger(detections=detections)
        line_annotator.annotate(frame=out_frame, line_counter=line_counter)
    return out_frame


cap = cv2.VideoCapture(input_path)

filehead = input_path.split('/')[-1]
output_path = "videos/out-" + filehead
frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
fps = cap.get(cv2.CAP_PROP_FPS)
out = cv2.VideoWriter(output_path, fourcc, fps, (int(frame_size[0]), int(frame_size[1])))

while cap.isOpened():
    success, frame = cap.read()
    if success:
        annotated_frame = process_frame(frame)
        cv2.imshow("YOLOv8 Tracking", annotated_frame)
        out.write(annotated_frame)
        # Break the loop if 'q' is pressed
        if cv2.waitKey(1) & 0xFF == ord("q"):
            break
    else:
        break

# Release the video capture object and close the display window
cv2.destroyAllWindows()
out.release()
cap.release()

print('视频已保存', output_path)

print('共跨线进入 ', line_counter.in_count)
print('共跨线离开 ', line_counter.out_count)
