# 导入必要的库
import cv2  # OpenCV 库，用于视频处理和图像显示
from ultralytics import YOLO  # YOLO 模型，用于目标检测
from deep_sort_realtime.deepsort_tracker import DeepSort  # DeepSORT 跟踪器，用于多目标跟踪
import numpy as np  # NumPy 库，用于数值计算

# 加载预训练的 YOLOv11 模型
model = YOLO('../ultralytics/assets/yolo11n.pt')

# 初始化 DeepSORT 跟踪器
# max_age: 如果一个目标连续 max_age 帧未被检测到，则认为该目标消失
# n_init: 需要连续 n_init 帧检测到同一个目标，才确认其存在
tracker = DeepSort(max_age=5, n_init=2)

def detect_fight_in_video(video_path):
    """
    检测视频中的打架行为。
    
    参数:
        video_path (str): 视频文件路径
    """
    cap = cv2.VideoCapture(video_path)  # 打开视频文件
    prev_frame_tracks = {}  # 存储上一帧的目标跟踪信息

    while cap.isOpened():  # 循环读取视频帧
        ret, frame = cap.read()  # 读取当前帧
        if not ret:  # 如果读取失败，退出循环
            break

        # 使用 YOLOv8 进行目标检测
        results = model(frame, classes=0)  # 只检测人（类别 0）
        detections = []  # 存储检测结果
        for result in results:
            boxes = result.boxes.cpu().numpy()  # 获取检测框
            for box in boxes:
                xyxy = box.xyxy[0].astype(int)  # 获取边界框坐标
                x1, y1, x2, y2 = xyxy
                w = x2 - x1  # 计算宽度
                h = y2 - y1  # 计算高度
                conf = box.conf[0]  # 置信度
                cls = box.cls[0]  # 类别
                detections.append(([x1, y1, w, h], conf, cls))  # 添加到检测结果列表

        # 使用 DeepSORT 进行目标跟踪
        tracks = tracker.update_tracks(detections, frame=frame)  # 更新跟踪信息
        current_frame_tracks = {}  # 存储当前帧的目标跟踪信息
        for track in tracks:
            if not track.is_confirmed():  # 如果目标未被确认，跳过
                continue
            track_id = track.track_id  # 获取目标 ID
            ltrb = track.to_ltrb()  # 获取目标边界框
            x1, y1, x2, y2 = map(int, ltrb)
            center = ((x1 + x2) // 2, (y1 + y2) // 2)  # 计算目标中心点
            current_frame_tracks[track_id] = center  # 存储目标中心点

            # 在图像上绘制目标框和 ID
            cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
            cv2.putText(frame, str(track_id), (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)

        # 简单的打架判断逻辑
        fighting_pairs = []  # 存储可能的打架对
        for id1, center1 in current_frame_tracks.items():
            for id2, center2 in current_frame_tracks.items():
                if id1 < id2:  # 避免重复比较
                    distance = np.linalg.norm(np.array(center1) - np.array(center2))  # 计算两目标之间的距离
                    if distance < 100:  # 如果距离小于阈值
                        if id1 in prev_frame_tracks and id2 in prev_frame_tracks:  # 如果上一帧中也存在这两个目标
                            prev_distance = np.linalg.norm(
                                np.array(prev_frame_tracks[id1]) - np.array(prev_frame_tracks[id2]))
                            distance_change = abs(prev_distance - distance)  # 计算距离变化
                            if distance_change > 20:  # 如果距离变化大于阈值，认为可能存在打架行为
                                fighting_pairs.append((id1, id2))

        # 在图像上标记打架行为
        for pair in fighting_pairs:
            id1, id2 = pair
            x1_1, y1_1 = current_frame_tracks[id1]
            x1_2, y1_2 = current_frame_tracks[id2]
            cv2.line(frame, (x1_1, y1_1), (x1_2, y1_2), (0, 0, 255), 2)  # 绘制连线
            cv2.putText(frame, "Fighting", (x1_1, y1_1 - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)  # 标记文字

        prev_frame_tracks = current_frame_tracks  # 更新上一帧的目标跟踪信息

        # 显示结果
        cv2.imshow('Fight Detection', frame)

        # 按下 'q' 键退出
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # 释放资源
    cap.release()
    cv2.destroyAllWindows()

if __name__ == "__main__":
    """
    主程序入口
    """
    video_path = "./dajia.mp4"  # 替换为你的视频文件路径
    detect_fight_in_video(video_path)  # 调用函数开始检测