import cv2
import torch
import numpy as np
from ultralytics import YOLO
from ultralytics.utils.plotting import Annotator, colors
from sahi import AutoDetectionModel
from sahi.predict import get_sliced_prediction
from ultralytics.engine.results import Results, Boxes
from pathlib import Path

# SAHI+Bytetrack
# 加载 YOLOv8 模型（用于 ByteTrack 跟踪）
yolo_model = YOLO("weights/visdrone+mydata100+crowdhuman200.pt")

# 加载 SAHI 检测模型
sahi_model = AutoDetectionModel.from_pretrained(
    model_type='ultralytics',
    model_path='weights/visdrone+mydata100+crowdhuman200.pt',
    confidence_threshold=0.65,
    device='cuda:0' if torch.cuda.is_available() else 'cpu'
)

# 视频设置
input_video_path = 'E:/EFY/002无人机视频/DJI_0826.MP4'
output_video_path = 'E:/EFY/003无人机数据/DJI_0826_sahi_bytetrack.mp4'
cap = cv2.VideoCapture(input_video_path)

frame_width = 1280
frame_height = 720
fps = cap.get(cv2.CAP_PROP_FPS)
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

out = cv2.VideoWriter(
    output_video_path,
    cv2.VideoWriter_fourcc(*'mp4v'),
    fps,
    (frame_width, frame_height)
)

# 统计变量
frame_number = 0
seen_ids = set()

while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        break

    frame = cv2.resize(frame, (frame_width, frame_height))

    # === SAHI 切片检测 ===
    prediction_result = get_sliced_prediction(
        image=frame,
        detection_model=sahi_model,
        slice_height=400,
        slice_width=400,
        overlap_height_ratio=0.05,
        overlap_width_ratio=0.05,
        postprocess_type="NMS"
    )

    # === 构建 Boxes（YOLOv8的追踪输入） ===
    xyxys = []
    confs = []
    clss = []
    for obj in prediction_result.object_prediction_list:
        if obj.category.id != 1:  # 只保留行人
            continue
        x1, y1, x2, y2 = obj.bbox.to_xyxy()
        xyxys.append([x1, y1, x2, y2])
        confs.append(obj.score.value)
        clss.append(obj.category.id)

    if not xyxys:
        out.write(frame)
        frame_number += 1
        print(f"Frame {frame_number}/{frame_count} - No detection")
        continue

    boxes_data = torch.tensor([
        [x1, y1, x2, y2, conf, cls] for (x1, y1, x2, y2), conf, cls in zip(xyxys, confs, clss)
    ])

    # 传入图像的原始尺寸 (height, width)
    orig_shape = (frame_height, frame_width)

    # 创建 Boxes 对象
    boxes = Boxes(boxes_data, orig_shape)

    # 构造 Results 对象
    # 获取类别名称（假设 YOLOv8 模型是通过 `model.names` 获取类别）
    names = yolo_model.names  # 这里获取模型的类别名称

    # 构造 Results 对象
    results = Results(orig_img=frame, path="", names=names)
    results.boxes = boxes

    # === 传入 ByteTrack 跟踪 ===
    track_results = yolo_model.track(
        persist=True,
        stream=False,
        conf=0.65,
        classes=[1],  # 只追踪行人
        tracker="bytetrack.yaml",
        save=False,
        verbose=False,
        imgsz=(720, 1280),  # 可选
        batch=1,
        source=frame  # <--- 这里要传 frame（追踪器状态会保留）
    )

    # 可视化
    plotted_frame = track_results[0].plot()
    current_ids = track_results[0].boxes.id

    # 当前帧人数 & 累计人数
    if current_ids is not None:
        ids = current_ids.tolist()
        current_count = len(ids)
        seen_ids.update(ids)
    else:
        current_count = 0

    total_count = len(seen_ids)

    # 左上角显示帧数
    cv2.putText(plotted_frame, f"Frame {frame_number + 1}/{frame_count}", (10, 30),
                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

    # 右上角显示当前人数与累计 ID 数
    cv2.putText(plotted_frame, f"Current: {current_count}", (frame_width - 250, 30),
                cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
    cv2.putText(plotted_frame, f"Total: {total_count}", (frame_width - 250, 70),
                cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)

    out.write(plotted_frame)
    frame_number += 1
    print(f"Processed frame {frame_number}/{frame_count} - Current: {current_count}, Total: {total_count}")

cap.release()
out.release()