import cv2
from ultralytics import YOLO
import os

# 配置参数
INPUT_VIDEO = "anime_fight.mp4"  # 输入动漫视频
OUTPUT_DIR = "highlights"        # 输出片段保存目录
MIN_INTERVAL = 3                 # 最大允许间隔帧数（超过则视为区间结束）
MIN_DURATION = 2                 # 最小片段时长（秒），过滤过短片段
TARGET_CLASSES = {"person", "knife", "explosion"}  # 目标类别

# 创建输出目录
os.makedirs(OUTPUT_DIR, exist_ok=True)

# 加载YOLO模型
model = YOLO("yolov8n.pt")

# 打开视频
cap = cv2.VideoCapture(INPUT_VIDEO)
if not cap.isOpened():
    raise ValueError(f"无法打开视频：{INPUT_VIDEO}")

# 获取视频基本信息
fps = cap.get(cv2.CAP_PROP_FPS)
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
min_frames = int(MIN_DURATION * fps)  # 最小片段帧数（基于时长）

# 区间跟踪变量
active_segment = False  # 是否处于活跃区间
segment_start = 0       # 区间起始帧
last_matched_frame = 0  # 上一帧符合条件的帧编号
segments = []           # 保存所有有效区间 [(start_frame, end_frame), ...]

# 逐帧处理
frame_idx = 0
while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        break  # 视频结束

    # 1. 用YOLO检测当前帧
    results = model(frame, verbose=False)  # 关闭日志输出
    cls_names = [results[0].names[int(cls_id)] for cls_id in results[0].boxes.cls.numpy()]
    
    # 2. 判断当前帧是否符合条件（包含目标类别）
    current_matched = any(cls in TARGET_CLASSES for cls in cls_names)

    # 3. 跟踪连续区间
    if current_matched:
        if not active_segment:
            # 开始新区间
            active_segment = True
            segment_start = frame_idx
            last_matched_frame = frame_idx
        else:
            # 延续当前区间，更新最后匹配帧
            last_matched_frame = frame_idx
    else:
        if active_segment:
            # 检查当前帧与上一匹配帧的间隔
            frame_gap = frame_idx - last_matched_frame
            if frame_gap > MIN_INTERVAL:
                # 间隔超过阈值，结束当前区间
                active_segment = False
                segment_end = last_matched_frame
                # 只保留长度足够的区间
                if (segment_end - segment_start) >= min_frames:
                    segments.append((segment_start, segment_end))
                    print(f"发现高光区间：帧{segment_start}到{segment_end}（时长：{(segment_end-segment_start)/fps:.1f}秒）")

    frame_idx += 1

# 处理视频结束时仍活跃的区间
if active_segment:
    segment_end = last_matched_frame
    if (segment_end - segment_start) >= min_frames:
        segments.append((segment_start, segment_end))

# 释放视频资源
cap.release()

# 4. 根据区间裁剪视频片段
if not segments:
    print("未发现符合条件的高光片段")
else:
    # 重新打开视频用于裁剪
    cap = cv2.VideoCapture(INPUT_VIDEO)
    fourcc = cv2.VideoWriter_fourcc(*"mp4v")  # MP4编码器

    for i, (start_frame, end_frame) in enumerate(segments):
        # 计算片段时长（秒）
        duration = (end_frame - start_frame) / fps
        print(f"裁剪片段 {i+1}/{len(segments)}：{duration:.1f}秒")

        # 跳转到起始帧
        cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
        
        # 创建输出视频
        output_path = f"{OUTPUT_DIR}/highlight_{i+1}.mp4"
        out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
        
        # 写入区间内的所有帧
        for _ in range(end_frame - start_frame + 1):
            ret, frame = cap.read()
            if not ret:
                break
            out.write(frame)
        
        # 释放资源
        out.release()

    cap.release()
    print(f"所有片段已保存到 {OUTPUT_DIR} 目录")
    