import cv2

from parse_srt import draw_chinese_text, parse_srt
from ultralytics import YOLO
import datetime

# 自定义绘制函数（不显示ID，细框，显示置信度）
def custom_plot(results):
    plot_frame = results[0].orig_img.copy()
    boxes = results[0].boxes
    for box in boxes:
        # 获取坐标和置信度
        x1, y1, x2, y2 = box.xyxy[0].tolist()
        conf = box.conf[0].item()

        # 绘制细框（线宽1）
        cv2.rectangle(plot_frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)

        # 显示置信度（保留2位小数）
        label = f"{conf:.2f}"
        cv2.putText(plot_frame, label, (int(x1), int(y1) - 5),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
    return plot_frame


# bytetrack处理视频
# 加载模型
model = YOLO('weights/red_best.pt')

# 打开视频
input_video_path = 'C:/Users/WUTLQJ/Desktop/红衣服/DJI_0020.MP4'
output_video_path = 'C:/Users/WUTLQJ/Desktop/红衣服out/DJI_0020_process.MP4'
SRT_PATH = 'C:/Users/WUTLQJ/Desktop/红衣服/DJI_0020.SRT'  # 替换为你的输出视频路径

cap = cv2.VideoCapture(input_video_path)

# 视频参数
frame_width = 1920
frame_height = 1080
fps = cap.get(cv2.CAP_PROP_FPS)
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

# 输出视频
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output_video_path, fourcc, fps, (frame_width, frame_height))

frame_number = 0
seen_ids = set()  # 累计检测过的 ID

# 加载字幕
srt_data = parse_srt(SRT_PATH)

while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        break
    frame = cv2.resize(frame, (frame_width, frame_height))

    # 获取当前帧的时间（毫秒）
    curr_time_ms = int((frame_number / fps) * 1000)
    # 匹配字幕
    for entry in srt_data:
        if entry['start'] <= curr_time_ms <= entry['end']:
            text = f"变焦: {int(entry['dzoom_ratio'])/10000}倍  高度: {entry['altitude']}m"
            print(text)
            frame = draw_chinese_text(frame, text, (10, 30))
            break  # 找到就不用继续找了

    # 推理当前帧（检测+追踪）
    results = model.track(
        source=frame,
        persist=True,
        conf=0.3,
        iou=0.5,
        classes=[0],  # 只检测行人
        verbose=False,
        tracker="bytetrack.yaml"
    )

    # # 可视化并写入
    # result_frame = results[0].plot()
    # 使用自定义绘制
    result_frame = custom_plot(results)

    # 获取当前帧的追踪 ID（排除 None）
    current_ids = results[0].boxes.id
    if current_ids is not None:
        current_ids = current_ids.tolist()
        seen_ids.update(current_ids)
        current_count = len(current_ids)
    else:
        current_count = 0

    total_count = len(seen_ids)

    # # 在左上角显示当前帧数和总帧数
    # text = f"Frame {frame_number + 1}/{frame_count}"
    # cv2.putText(result_frame, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
    # # 右上角显示检测数量
    # cv2.putText(result_frame, f"Current: {current_count}", (frame_width - 250, 30),
    #             cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
    # cv2.putText(result_frame, f"Total: {total_count}", (frame_width - 250, 70),
    #             cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
    # 写入处理后的帧
    out.write(result_frame)

    frame_number += 1
    print(f'Processing frame {frame_number}/{frame_count}')

# 释放资源
cap.release()
out.release()

