import os

import time

import cv2
import numpy as np
from PIL.Image import Image
from sahi import AutoDetectionModel
from sahi.predict import get_prediction, get_sliced_prediction, predict
import supervision as sv

from parse_srt import parse_srt, draw_chinese_text
from ultralytics import YOLO
import datetime
from collections import defaultdict

def mp4_only_jiaoben(input_video_path,str_path,output_video_path):
    # 打开输入视频
    # 1920,1080
    # 1280,720
    # 3840,2160


    # ------------------------- 配置参数 -------------------------
    VIDEO_PATH = input_video_path          # 输入视频路径
    OUTPUT_VIDEO_PATH = output_video_path  # 输出视频路径
    SRT_PATH = str_path
    YOLO_MODEL_PATH = 'weights/yanxuegu_web_400+web100.pt'    # YOLOv11模型路径
    CLASS_IDS = [1]                # 只跟踪的类别ID（0:人, 2:车等）
    SAHI_SLICE_SIZE = 800             # SAHI切片大小（根据目标大小调整）
    TRACKER_CONFIG = "bytetrack.yaml" # 使用ByteTrack跟踪器
    TRACKER_TYPE = "bytetrack"        # 跟踪算法（bytetrack/botsort）

    # ------------------------------------------------------------

    detection_model = AutoDetectionModel.from_pretrained(
        model_type='ultralytics',
        model_path=YOLO_MODEL_PATH,
        confidence_threshold=0.45,
        device="cuda:0", # or 'cuda:0'
    )
    start_time = time.time()

    # 初始化YOLO跟踪器
    from ultralytics import YOLO

    yolo_model = YOLO(YOLO_MODEL_PATH)
    yolo_model.fuse()  # 加速模型


    # 打开视频流
    cap = cv2.VideoCapture(VIDEO_PATH)
    fps = cap.get(cv2.CAP_PROP_FPS)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    frame_count_total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    # 创建输出视频写入器
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(OUTPUT_VIDEO_PATH, fourcc, fps, (1920,1080))

    # 加载字幕
    srt_data = parse_srt(SRT_PATH)


    frame_count = 0
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        frame = cv2.resize(frame, (1920,1080))

        # 获取当前帧的时间（毫秒）
        curr_time_ms = int((frame_count / fps) * 1000)
        # 匹配字幕
        for entry in srt_data:
            if entry['start'] <= curr_time_ms <= entry['end']:
                text = f"变焦: {int(entry['dzoom_ratio'])/10000}倍  高度: {entry['altitude']}m"
                frame = draw_chinese_text(frame, text, (10, 30))
                break  # 找到就不用继续找了

        # -------------------- SAHI切片检测 --------------------
        sahi_result = get_sliced_prediction(
            frame,
            detection_model,
            slice_height=SAHI_SLICE_SIZE,
            slice_width=SAHI_SLICE_SIZE,
            overlap_height_ratio=0.1,
            overlap_width_ratio=0.1
        )

        # 转换为YOLO跟踪所需的检测格式 (xyxy, conf, cls)
        detections = []
        for pred in sahi_result.object_prediction_list:
            if pred.category.id not in CLASS_IDS:
                continue
            x1, y1, x2, y2 = map(int, pred.bbox.to_xyxy())
            detections.append([x1, y1, x2, y2, pred.score.value, pred.category.id])
            center = ((x1 + x2) // 2, (y1 + y2) // 2)
            # 绘制检测框和ID
            cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
            label = f'{pred.category.id} {pred.score.value:.2f}'
            cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
            cv2.putText(frame, label, (x1, max(0, y1 - 5)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)

        # 写入输出视频
        out.write(frame)
        print(f"处理进度: {frame_count} 帧/{frame_count_total} 帧")
        frame_count += 1

    # 记录结束时间
    end_time = time.time()

    # 计算总耗时（秒）
    total_time = end_time - start_time

    # 格式化输出（包含小数秒）
    print(f"\n程序开始时间：{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))}")
    print(f"程序结束时间：{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))}")
    print(f"总运行时间：{total_time:.6f} 秒")
    print(f"完成！视频已保存至: {OUTPUT_VIDEO_PATH}")
    # 释放资源
    cap.release()
    out.release()







def process_videos(input_dir, output_dir):
    # 确保输出目录存在
    os.makedirs(output_dir, exist_ok=True)

    for filename in os.listdir(input_dir):
        if filename.lower().endswith(".mp4"):
            video_path = os.path.join(input_dir, filename)

            # 找对应的 .str 文件
            base_name = os.path.splitext(filename)[0]
            str_path = os.path.join(input_dir, f"{base_name}.SRT")
            output_path = os.path.join(output_dir, filename)

            # 如果输出文件已存在，则跳过
            if os.path.exists(output_path):
                print(f"已存在，跳过：{output_path}")
                continue

            # 检查字幕文件是否存在
            if not os.path.exists(str_path):
                print(f"未找到字幕文件，跳过：{str_path}")
                continue

            # 调用处理方法
            mp4_only_jiaoben(video_path, str_path, output_path)


if __name__ == "__main__":
    input_dir = 'E:/EFY/001人群/DCIM/101MEDIA'
    output_dir = 'E:/EFY/001人群/OUTPUT'
    process_videos(input_dir, output_dir)
