import cv2
import numpy as np
from sahi import AutoDetectionModel
from sahi.predict import get_sliced_prediction
from boxmot import ByteTrack

# 初始化SAHI的YOLOv8检测模型
detection_model = AutoDetectionModel.from_pretrained(
    model_type='ultralytics',
    model_path='weights/visdrone+mydata100+crowdhuman200.pt',
    confidence_threshold=0.65,
    device='cuda:0'
)

# 初始化ByteTrack跟踪器
tracker = ByteTrack()

# 输入和输出视频路径
input_video_path = 'E:/EFY/002无人机视频/DJI_0812.MP4'
output_video_path = 'E:/EFY/003无人机数据/DJI_0812.MP4'

# 打开视频文件
cap = cv2.VideoCapture(input_video_path)
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)

# 创建VideoWriter
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output_video_path, fourcc, fps, (frame_width, frame_height))

# 用于记录总人数的集合
total_ids = set()

while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        break

    # 使用SAHI进行切片检测
    result = get_sliced_prediction(
        frame,
        detection_model,
        slice_height=800,  # 根据视频分辨率调整
        slice_width=800,
        overlap_height_ratio=0.1,
        overlap_width_ratio=0.1,
    )

    # 提取行人检测结果
    person_bboxes = []
    person_scores = []
    person_classes = []  # 新增类别存储
    for pred in result.object_prediction_list:
        if pred.category.name == 'person':
            bbox = pred.bbox.to_xyxy()
            person_bboxes.append(bbox)
            person_scores.append(pred.score.value)

    # 修改检测结果格式部分：
    if person_bboxes:
        bboxes = np.array(person_bboxes, dtype=np.float32)
        scores = np.array(person_scores, dtype=np.float32).reshape(-1, 1)
        # 添加类别信息（person类别的cls_id=0）
        classes = np.zeros((scores.shape[0], 1), dtype=np.float32)
        detections = np.concatenate([bboxes, scores, classes], axis=1)  # 现在形状为(N,6)
    else:
        detections = np.empty((0, 6), dtype=np.float32)  # 保持6列

    # 更新跟踪器
    tracks = tracker.update(detections, frame)  # 传入当前帧用于跟踪

    current_ids = []
    for track in tracks:
        x1, y1, x2, y2, track_id, score,score1,score2 = track.astype(int)
        score=track[5]
        print(x1, y1, x2, y2, track_id, score,score1,score2)
        current_ids.append(track_id)

        # 绘制检测框和ID
        cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
        cv2.putText(frame, f'ID: {track_id}', (x1, y1 - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
        cv2.putText(frame, f'Conf: {score }', (x1, y2 + 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)

    # 更新总人数统计
    total_ids.update(current_ids)
    current_count = len(current_ids)
    total_count = len(total_ids)

    # 在右上角显示计数
    cv2.putText(frame, f'Current: {current_count}', (frame_width - 250, 40),
               cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 4)
    cv2.putText(frame, f'Total: {total_count}', (frame_width - 250, 80),
               cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 4)

    # 写入输出视频
    out.write(frame)
    print(f'Current: {current_count}')
    print(f'Total: {total_count}')

# 释放资源
cap.release()
out.release()
cv2.destroyAllWindows()