import cv2
import numpy as np

# 初始化YOLOv4-tiny模型
LABELS = open("classes.names").read().strip().split("\n")
net = cv2.dnn.readNetFromDarknet(r'D:\xunlian\yolo_opencv\222\yolov4-tiny.cfg', r'D:\xunlian\yolo_opencv\222\trained_models\yolov4-tiny_final.weights')
layer = net.getUnconnectedOutLayersNames()

# 定义视频来源：0为默认的网络摄像头，也可以是视频文件路径
video_source = 0  # 或者 'path_to_video_file.mp4'

cap = cv2.VideoCapture(video_source)

if not cap.isOpened():
    print("无法打开视频源")
    exit()

frame_count = 0
fps_limit = 30  # 限制最大帧率为30 FPS
prev_frame_time = 0

while True:
    ret, frame = cap.read()
    if not ret:
        print("无法获取帧，可能是视频结束。退出...")
        break

    frame_count += 1
    if frame_count % 10 == 0:  # 每10帧打印一次帧号以监控进度
        print(f"Processing frame {frame_count}")

    (H, W) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
    net.setInput(blob)
    layerOutputs = net.forward(layer)

    boxes = []
    confidences = []
    classIDs = []

    for output in layerOutputs:
        for detection in output:
            scores = detection[5:]
            classID = np.argmax(scores)
            confidence = scores[classID]
            if confidence > 0.01:  # 置信度阈值可以根据需要调整
                box = detection[0:4] * np.array([W, H, W, H])
                (centerX, centerY, width, height) = box.astype("int")
                x = int(centerX - (width / 2))
                y = int(centerY - (height / 2))
                boxes.append([x, y, int(width), int(height)])
                confidences.append(float(confidence))
                classIDs.append(classID)

    idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.01, 0.8)  # 调整非极大抑制的阈值

    if len(idxs) > 0:
        for i in idxs.flatten():
            (x, y) = (boxes[i][0], boxes[i][1])
            (w, h) = (boxes[i][2], boxes[i][3])
            color = [int(c) for c in np.random.randint(0, 255, size=3)]
            cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
            text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
            cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)

    new_frame_time = cv2.getTickCount()
    fps = cv2.getTickFrequency() / (new_frame_time - prev_frame_time)
    prev_frame_time = new_frame_time
    cv2.putText(frame, f"FPS: {int(fps)}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

    cv2.imshow('Video Detection', frame)

    # 控制帧率
    key = cv2.waitKey(max(1, int(1000 / fps_limit))) & 0xFF
    if key == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()