# 绘制随时间变化的轨迹
from collections import defaultdict

import cv2
import numpy as np
import torch

from ultralytics import YOLO

# Load the YOLOv8 model
model = YOLO('yolov8n.pt')

# Open the video file
# video_path = "C:\\Users\\Administrator\\Desktop\\1.ts"
video_path = 0
cap = cv2.VideoCapture(video_path)

# Store the track history
track_history = defaultdict(lambda: [])

# 用于保存图像
# fourcc = cv2.VideoWriter_fourcc(*'mp4v')
# out_cat = cv2.VideoWriter("C:\\Users\\Administrator\\Desktop\\save.mp4", fourcc, 24, (352, 288), True)  # 保存位置/格式

# Loop through the video frames
while cap.isOpened():
    # Read a frame from the video
    success, frame = cap.read()

    if success:
        # Run YOLOv8 tracking on the frame, persisting tracks between frames
        results = model.track(frame, persist=True)

        # Get the boxes and track IDs
        boxes = results[0].boxes.xywh.cpu()
        target = results[0].boxes.cls
        print(target)
        if 0 in target:
            print("you 人")
        else:
            print("没人")

        if results[0].boxes.id is not None and 0 in target:
            track_ids = results[0].boxes.id.int().cpu().tolist()
            print(track_ids,"当前id")
            num_zeros = torch.sum(target == 0).item()
            print(num_zeros,"人的数量")
            # print(target.count(0))
            print(results[0].boxes.cls[0].item() ==0,"当前的类型")

            # print(results[0].names)
            # class_id = results[0].names[boxes[0].cls[0].item()]
        # Visualize the results on the frame
            annotated_frame = results[0].plot()
            annotated_frame = cv2.putText(annotated_frame, str(num_zeros)+ "person", (250, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.2,
                                          (0, 0, 255), 2)

        else:
            annotated_frame = results[0].plot()
            annotated_frame = cv2.putText(annotated_frame,"no person",(250,50),cv2.FONT_HERSHEY_SIMPLEX,1.2,(0,0,255),2)

        # # Plot the tracks
        # if results[0].boxes.id is not None and 0 in target:
        #
        #     for box, track_id in zip(boxes, track_ids):
        #         x, y, w, h = box
        #         track = track_history[track_id]
        #         track.append((float(x), float(y)))  # x, y center point
        #         if len(track) > 30:  # retain 90 tracks for 90 frames
        #             track.pop(0)
        #
        #         # Draw the tracking lines
        #         points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2))
        #         cv2.polylines(annotated_frame, [points], isClosed=False, color=(track_id*10%255, 100, 255), thickness=2)
        #
        # # Display the annotated frame
        #
        cv2.imshow("YOLOv8 Tracking", annotated_frame)
        # else:
        #     cv2.imshow("oringin",frame)

        # out_cat.write(annotated_frame)  # 保存视频

        # Break the loop if 'q' is pressed
        if cv2.waitKey(1) & 0xFF == ord("q"):
            break
    else:
        # Break the loop if the end of the video is reached
        break

# Release the video capture object and close the display window
cap.release()
cv2.destroyAllWindows()
