from ultralytics import YOLO
import cv2
import numpy as np
import torch
from torchvision.ops import nms
from collections import deque, defaultdict

# 初始化模型（显式指定设备）
model = YOLO("D:/Project/sightiq-ai/video/train/wider_train/train6/weights/best.pt")
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)

# 视频设置
video_path = "demo1.mp4"
cap = cv2.VideoCapture(video_path)
fps = int(cap.get(cv2.CAP_PROP_FPS)) or 30
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

# 参数配置
conf_threshold = 0.25  # 平衡精度和召回率
iou_threshold = 0.45   # 放宽NMS限制
min_movement = 5       # 降低静止判定阈值

# 网格分块
grid_rows, grid_cols = 6, 6
block_h, block_w = height // grid_rows, width // grid_cols

# 计数与追踪
person_count_queue = deque(maxlen=10)
track_memory = defaultdict(list)

while True:
    ret, frame = cap.read()
    if not ret:
        break

    total_person_count = 0
    all_boxes = []
    all_scores = []
    all_classes = []

    # 网格分块检测
    for i in range(grid_rows):
        for j in range(grid_cols):
            x1, y1 = j * block_w, i * block_h
            x2, y2 = (j + 1) * block_w, (i + 1) * block_h
            block = frame[y1:y2, x1:x2]

            # 推理当前块
            results = model(block, conf=conf_threshold, iou=iou_threshold, imgsz=640)[0]
            if results.boxes is None:
                continue

            # 转换框到全局坐标
            boxes = results.boxes.xyxy.cpu().numpy()
            boxes[:, [0, 2]] += x1  # x坐标偏移
            boxes[:, [1, 3]] += y1  # y坐标偏移
            all_boxes.extend(boxes)
            all_scores.extend(results.boxes.conf.cpu().numpy())
            all_classes.extend(results.boxes.cls.cpu().numpy())

    # 全局NMS
    if len(all_boxes) > 0:
        keep = nms(
            torch.tensor(all_boxes, dtype=torch.float32),
            torch.tensor(all_scores, dtype=torch.float32),
            iou_threshold=iou_threshold
        )
        final_boxes = np.array(all_boxes)[keep.cpu().numpy()]
        final_classes = np.array(all_classes)[keep.cpu().numpy()]

        # 绘制检测框并统计
        for idx, (x1, y1, x2, y2) in enumerate(final_boxes):
            if final_classes[idx] != 0:  # 仅统计行人
                continue

            total_person_count += 1
            cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)

            # 轨迹追踪（用于过滤静止目标）
            cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
            track_memory[idx].append((cx, cy))

    # 过滤静止目标（需至少3帧历史数据）
    still_track_ids = 0
    for track_id in list(track_memory.keys()):
        if len(track_memory[track_id]) >= 3:
            movements = [
                np.linalg.norm(np.array(track_memory[track_id][i]) - np.array(track_memory[track_id][i-1]))
                for i in range(1, len(track_memory[track_id]))
            ]
            avg_move = sum(movements) / len(movements)
            if avg_move < min_movement:
                still_track_ids += 1
                del track_memory[track_id]

    total_person_count = max(0, total_person_count - still_track_ids)

    # 平滑计数
    person_count_queue.append(total_person_count)
    smoothed_count = round(sum(person_count_queue) / len(person_count_queue))

    # 显示结果
    cv2.putText(frame, f"Persons: {smoothed_count}", (10, 50),
                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
    cv2.imshow("Person Detection", frame)
    if cv2.waitKey(1) == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()