import cv2
import threading
import queue
from collections import OrderedDict
from sahi import AutoDetectionModel
from sahi.predict import get_sliced_prediction

# 配置参数
INPUT_VIDEO_PATH = 'E:/EFY/002无人机视频/DJI_0813.MP4'
OUTPUT_VIDEO_PATH = 'E:/EFY/003无人机数据/DJI_0813-2.MP4'
MODEL_PATH = 'weights/visdrone+mydata100+crowdhuman200.pt'
CLASS_ID = 1
SAHI_SLICE_SIZE = 1000
CONFIDENCE_THRESHOLD = 0.5
BATCH_SIZE = 8  # 每个线程处理的帧数
NUM_WORKERS = 4  # 工作线程数量
QUEUE_SIZE = 16  # 处理队列容量

# 初始化SAHI检测模型
detection_model = AutoDetectionModel.from_pretrained(
    model_type='ultralytics',
    model_path=MODEL_PATH,
    confidence_threshold=CONFIDENCE_THRESHOLD,
    device="cuda:0",
)

# 多线程队列
input_queue = queue.Queue(maxsize=QUEUE_SIZE)
output_queue = queue.PriorityQueue()


# 工作线程处理函数
def batch_processor():
    while True:
        frame_batch = []
        # 收集一个批次的帧
        for _ in range(BATCH_SIZE):
            item = input_queue.get()
            if item is None:
                return
            frame_batch.append(item)

        # 批量处理帧
        processed_batch = []
        for frame_num, frame in frame_batch:
            # 执行SAHI切片检测
            result = get_sliced_prediction(
                frame,
                detection_model,
                slice_height=SAHI_SLICE_SIZE,
                slice_width=SAHI_SLICE_SIZE,
                overlap_height_ratio=0.1,
                overlap_width_ratio=0.1
            )

            # 绘制检测结果
            current_count = 0
            print(current_count)
            for pred in result.object_prediction_list:
                if pred.category.id == CLASS_ID:
                    current_count += 1
                    x1, y1, x2, y2 = map(int, pred.bbox.to_xyxy())
                    cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
            print(current_count)

            # 添加人数统计
            cv2.putText(frame, f"People: {current_count}", (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
            print(current_count)

            processed_batch.append((frame_num, frame))

        # 将结果放入优先队列
        for frame_num, frame in processed_batch:
            output_queue.put((frame_num, frame))


# 启动工作线程池
thread_pool = []
for _ in range(NUM_WORKERS):
    t = threading.Thread(target=batch_processor)
    t.start()
    thread_pool.append(t)

# 打开视频流
cap = cv2.VideoCapture(INPUT_VIDEO_PATH)
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

# 创建视频写入器
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(OUTPUT_VIDEO_PATH, fourcc, fps, (width, height))

# 主线程：读取视频帧
frame_counter = 0
while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        break
    # 将帧放入处理队列
    input_queue.put((frame_counter, frame.copy()))
    frame_counter += 1

# 发送结束信号
for _ in range(NUM_WORKERS * BATCH_SIZE):
    input_queue.put(None)

# 等待所有工作线程结束
for t in thread_pool:
    t.join()

# 主线程：顺序写入处理结果
next_frame = 0
buffer = OrderedDict()

while next_frame < frame_counter:
    if not output_queue.empty():
        frame_num, frame = output_queue.get()
        buffer[frame_num] = frame

        # 按顺序写入可用帧
        while next_frame in buffer:
            out.write(buffer.pop(next_frame))
            print(f"写入帧 {next_frame}")
            next_frame += 1

# 释放资源
cap.release()
out.release()
print(f"处理完成！输出视频已保存至: {OUTPUT_VIDEO_PATH}")