import cv2
import numpy as np
from ultralytics import YOLO
from moviepy.video.io.VideoFileClip import VideoFileClip
from moviepy.video.compositing.CompositeVideoClip import concatenate_videoclips
import os

"""
要解决 “打斗场面中穿插对话片段（非打斗画面）需保留以保证连贯性” 的问题，
核心是从 “按帧检测打斗目标” 升级为 “识别完整的打斗场景”—— 即把打斗场景视为一个连续的叙事单元，
即使中间穿插对话（非打斗画面），只要属于同一打斗场景，就整体保留。
"""

# 配置参数
INPUT_VIDEO = "anime_fight.mp4"
OUTPUT_DIR = "fight_scenes_with_dialog"
SCENE_THRESHOLD = 3000  # 场景切换阈值（值越大，越不容易判定为切换）
MIN_SCENE_DURATION = 5  # 最小场景时长（秒），过滤过短场景
TARGET_CLASSES = {"person", "knife", "sword", "explosion"}  # 打斗相关目标

# 创建输出目录
os.makedirs(OUTPUT_DIR, exist_ok=True)

def detect_scene_changes(video_path, threshold=SCENE_THRESHOLD):
    """检测视频中的场景切换点（返回切换点的秒数）"""
    cap = cv2.VideoCapture(video_path)
    fps = cap.get(cv2.CAP_PROP_FPS)
    prev_frame = None
    scene_changes = [0.0]  # 初始场景从0秒开始
    frame_idx = 0

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        # 转为灰度图并缩小，减少计算量
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.resize(gray, (320, 240))

        if prev_frame is not None:
            # 计算当前帧与前一帧的差异
            frame_diff = cv2.absdiff(gray, prev_frame)
            diff_sum = np.sum(frame_diff)  # 差异总和

            # 差异超过阈值，视为场景切换
            if diff_sum > threshold:
                scene_time = frame_idx / fps  # 转换为秒
                scene_changes.append(scene_time)

        prev_frame = gray
        frame_idx += 1

    # 添加视频结束时间作为最后一个切换点
    total_duration = frame_idx / fps
    scene_changes.append(total_duration)
    cap.release()

    # 过滤过短场景（小于最小时长）
    filtered = [scene_changes[0]]
    for t in scene_changes[1:]:
        if t - filtered[-1] >= MIN_SCENE_DURATION:
            filtered.append(t)
    return filtered

def is_fight_scene(video_path, start_sec, end_sec, model):
    """判断一个场景（start_sec到end_sec）是否包含打斗内容"""
    cap = cv2.VideoCapture(video_path)
    fps = cap.get(cv2.CAP_PROP_FPS)
    start_frame = int(start_sec * fps)
    end_frame = int(end_sec * fps)
    cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)

    # 抽样检测（每10帧检测一次，提高效率）
    sample_interval = 10
    frame_count = end_frame - start_frame
    samples = min(10, frame_count // sample_interval)  # 最多检测10个样本

    for _ in range(samples):
        ret, frame = cap.read()
        if not ret:
            break
        # 跳过多余帧
        for _ in range(sample_interval - 1):
            cap.read()

        # YOLO检测打斗目标
        results = model(frame, verbose=False)
        cls_names = [results[0].names[int(cls_id)] for cls_id in results[0].boxes.cls.numpy()]
        if any(cls in TARGET_CLASSES for cls in cls_names):
            cap.release()
            return True  # 只要检测到一次打斗目标，就视为打斗场景

    cap.release()
    return False

def extract_fight_scenes():
    # 1. 检测场景切换点
    scene_changes = detect_scene_changes(INPUT_VIDEO)
    print(f"检测到{len(scene_changes)-1}个场景")

    # 2. 加载YOLO模型
    model = YOLO("yolov8n.pt")

    # 3. 筛选打斗场景
    fight_scenes = []
    for i in range(len(scene_changes)-1):
        start = scene_changes[i]
        end = scene_changes[i+1]
        duration = end - start
        print(f"检测场景 {i+1}: {start:.1f}~{end:.1f}秒（时长{duration:.1f}秒）")

        if is_fight_scene(INPUT_VIDEO, start, end, model):
            fight_scenes.append((start, end))
            print(f"→ 标记为打斗场景（含对话）")

    # 4. 裁剪并保存打斗场景（含完整音频）
    if not fight_scenes:
        print("未检测到打斗场景")
        return

    video = VideoFileClip(INPUT_VIDEO)
    clips = []
    for i, (start, end) in enumerate(fight_scenes):
        clip = video.subclipped(start, end)
        # 保存单个场景
        clip_path = os.path.join(OUTPUT_DIR, f"fight_scene_{i+1}.mp4")
        clip.write_videofile(
            clip_path,
            codec="libx264",
            audio_codec="aac",
            fps=video.fps
        )
        print(f"已保存打斗场景 {i+1}: {clip_path}")
        clips.append(clip)

    # 5. 拼接所有打斗场景（按原顺序）
    final_clip = concatenate_videoclips(clips, method="compose")
    final_path = os.path.join(OUTPUT_DIR, "all_fight_scenes_merged.mp4")
    final_clip.write_videofile(
        final_path,
        codec="libx264",
        audio_codec="aac",
        fps=video.fps
    )
    print(f"所有打斗场景已拼接：{final_path}")

    # 释放资源
    video.close()
    for clip in clips:
        clip.close()

if __name__ == "__main__":
    extract_fight_scenes()
    