import time

import cv2
import numpy as np
from PIL.Image import Image
from sahi import AutoDetectionModel
from sahi.predict import get_prediction, get_sliced_prediction, predict
import supervision as sv

from parse_srt import parse_srt, draw_chinese_text
from ultralytics import YOLO
import datetime
from collections import defaultdict

# 打开输入视频
input_video_path = 'E:/EFY/001人群/DCIM/100MEDIA/DJI_0862.MP4'  # 替换为你的输入视频路径
SRT_PATH = 'E:/EFY/001人群/DCIM/100MEDIA/DJI_0862.SRT'  # 替换为你的输出视频路径
output_video_path = 'E:/EFY/001人群/OUTPUT/DJI_0862-800-1920,1080.MP4'  # 替换为你的输出视频路径
# input_video_path = 'E:/EFY/002无人机视频/DJI_0827.MP4'  # 替换为你的输入视频路径
# output_video_path = 'E:/EFY/002无人机视频/DJI_0827-800-1920,1080.MP4'  # 替换为你的输出视频路径
# 1920,1080
# 1280,720
# 3840,2160


# ------------------------- 配置参数 -------------------------
VIDEO_PATH = input_video_path          # 输入视频路径
OUTPUT_VIDEO_PATH = output_video_path  # 输出视频路径
YOLO_MODEL_PATH = 'weights/yanxuegu_web_400+web100.pt'    # YOLOv11模型路径
CLASS_IDS = [1]                # 只跟踪的类别ID（0:人, 2:车等）
SAHI_SLICE_SIZE = 800             # SAHI切片大小（根据目标大小调整）
TRACKER_CONFIG = "bytetrack.yaml" # 使用ByteTrack跟踪器
TRACKER_TYPE = "bytetrack"        # 跟踪算法（bytetrack/botsort）

# ------------------------------------------------------------

detection_model = AutoDetectionModel.from_pretrained(
    model_type='ultralytics',
    model_path=YOLO_MODEL_PATH,
    confidence_threshold=0.45,
    device="cuda:0", # or 'cuda:0'
)
start_time = time.time()

# 初始化YOLO跟踪器
from ultralytics import YOLO

yolo_model = YOLO(YOLO_MODEL_PATH)
yolo_model.fuse()  # 加速模型


# 打开视频流
cap = cv2.VideoCapture(VIDEO_PATH)
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_count_total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

# 创建输出视频写入器
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(OUTPUT_VIDEO_PATH, fourcc, fps, (1920,1080))

# 加载字幕
srt_data = parse_srt(SRT_PATH)


frame_count = 0
while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        break
    frame = cv2.resize(frame, (1920,1080))

    # 获取当前帧的时间（毫秒）
    curr_time_ms = int((frame_count / fps) * 1000)
    # 匹配字幕
    for entry in srt_data:
        if entry['start'] <= curr_time_ms <= entry['end']:
            text = f"变焦: {int(entry['dzoom_ratio'])/10000}倍  高度: {entry['altitude']}m"
            frame = draw_chinese_text(frame, text, (10, 30))
            break  # 找到就不用继续找了

    # -------------------- SAHI切片检测 --------------------
    sahi_result = get_sliced_prediction(
        frame,
        detection_model,
        slice_height=SAHI_SLICE_SIZE,
        slice_width=SAHI_SLICE_SIZE,
        overlap_height_ratio=0.1,
        overlap_width_ratio=0.1
    )

    # 转换为YOLO跟踪所需的检测格式 (xyxy, conf, cls)
    detections = []
    for pred in sahi_result.object_prediction_list:
        if pred.category.id not in CLASS_IDS:
            continue
        x1, y1, x2, y2 = map(int, pred.bbox.to_xyxy())
        detections.append([x1, y1, x2, y2, pred.score.value, pred.category.id])
        center = ((x1 + x2) // 2, (y1 + y2) // 2)
        # 绘制检测框和ID
        cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
        label = f'{pred.category.id} {pred.score.value:.2f}'
        cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
        cv2.putText(frame, label, (x1, max(0, y1 - 5)),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)

    # 写入输出视频
    out.write(frame)
    print(f"处理进度: {frame_count} 帧/{frame_count_total} 帧")
    frame_count += 1

# 记录结束时间
end_time = time.time()

# 计算总耗时（秒）
total_time = end_time - start_time

# 格式化输出（包含小数秒）
print(f"\n程序开始时间：{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))}")
print(f"程序结束时间：{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))}")
print(f"总运行时间：{total_time:.6f} 秒")
print(f"完成！视频已保存至: {OUTPUT_VIDEO_PATH}")
# 释放资源
cap.release()
out.release()






