import random
import cv2
import time
import numpy as np
import torch
from ultralytics import YOLO
from deep_sort_pytorch.utils.parser import get_config
from deep_sort_pytorch.deep_sort import DeepSort
# 设置视频流的URL
url = 'person1.mp4'
deepsort = None
data_deque = {}

# 初始化模型
model = YOLO('model/yolov8n.pt')  # 使用YOLOv8 nano模型
def compute_color_for_labels(label):
    """
    Simple function that adds fixed color depending on the class
    """
    if label == 0: #person
        color = (85,45,255)
    elif label == 2: # Car
        color = (222,82,175)
    elif label == 3:  # Motobike
        color = (0, 204, 255)
    elif label == 5:  # Bus
        color = (0, 149, 255)
    else:
        color = (int(random.randint(0, 255)), int(random.randint(0, 255)), int(random.randint(0, 255)))
    return tuple(color)
def draw_boxes(labels,outputs, frame):
    for output in outputs:
        bbox_xyxy = output[:4].astype(int)  # Convert bounding box coordinates to integers
        identity = int(output[-2])  # Extract the identity
        obj_id = int(output[-1])  # Extract the object class ID
        
        # Define colors for each ID (you can customize this)
        color = compute_color_for_labels(obj_id)

        # Draw bounding box
        cv2.rectangle(frame, (bbox_xyxy[0], bbox_xyxy[1]), (bbox_xyxy[2], bbox_xyxy[3]), color, 2)
        
        # Draw the identity label on the frame
        label = f"ID: {identity} - Class: {obj_id} - Label: {labels[obj_id]}"
        (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 1)
        cv2.rectangle(frame, (bbox_xyxy[0], bbox_xyxy[1] - 20), (bbox_xyxy[0] + w, bbox_xyxy[1]), color, -1)
        cv2.putText(frame, label, (bbox_xyxy[0], bbox_xyxy[1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)

    return frame

def init_tracker():
    global deepsort
    cfg_deep = get_config()
    cfg_deep.merge_from_file("deep_sort_pytorch/configs/deep_sort.yaml")

    deepsort= DeepSort(cfg_deep.DEEPSORT.REID_CKPT,
                            max_dist=cfg_deep.DEEPSORT.MAX_DIST, min_confidence=cfg_deep.DEEPSORT.MIN_CONFIDENCE,
                            nms_max_overlap=cfg_deep.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg_deep.DEEPSORT.MAX_IOU_DISTANCE,
                            max_age=cfg_deep.DEEPSORT.MAX_AGE, n_init=cfg_deep.DEEPSORT.N_INIT, nn_budget=cfg_deep.DEEPSORT.NN_BUDGET,
                            use_cuda=True)
##########################################################################################
init_tracker()
# 打开视频流
cap = cv2.VideoCapture(url)

# 检查是否成功打开视频流
if not cap.isOpened():
    print("无法打开视频流")
    exit()

# 获取视频流的帧率
frame_rate = cap.get(cv2.CAP_PROP_FPS)
print("视频流的帧率:", frame_rate)

# 设置跳帧数
skip_frames = 5  # 每5帧跳一帧

# 使用opencv创建显示窗口
cv2.namedWindow('video', cv2.WINDOW_NORMAL)
cv2.resizeWindow("video", 1280, 720)

# 读取视频帧并显示
frame_count = 0
while True:
    ret, frame = cap.read()

    if not ret:
        print("无法读取视频帧")
        break

    frame_count += 1

    # 若不是展示帧，则跳过
    if frame_count % skip_frames != 0:
        continue

    # 进行目标检测与跟踪
    results = model(source=frame,verbose=False,conf=0.3)[0]
    print('results',len(results))
    if len(results):
        xywhs = torch.Tensor(results.boxes.xywh.tolist())
        confss = results.boxes.conf.tolist()
        oids = results.boxes.cls.tolist()
        outputs = deepsort.update(xywhs, confss, oids, frame)
        print('outputs',outputs)
        # outputs [[   0   30  142  580    1    5]
                # [ 908  110 1098  271    3    2]
                # [ 569   24  768  173    4    2]
                # [ 221  458  399  614    5    7]]
        if len(outputs) > 0:
            bbox_xyxy = outputs[:, :4]
            identities = outputs[:, -2]
            object_id = outputs[:, -1]
            # 绘制
            annotated_frame = draw_boxes(results.names,outputs,frame)
            cv2.imshow("video", annotated_frame)
        else:
            # 显示原图像
            cv2.imshow("video", frame)
    else:
        # 显示原图像
        cv2.imshow("video", frame)
    
    # # 从results中获取第一个元素并绘制它
    # annotated_frame = results[0].plot()
    # # 显示图像
    # cv2.imshow("video", annotated_frame)

    # 等待适当的时间
    key = cv2.waitKey(int(1000 / frame_rate))

    # 检查是否按下键盘上的任意键
    if key != -1:
        break

# 释放视频捕获对象并关闭窗口
cap.release()
cv2.destroyAllWindows()
