import cv2
import numpy as np
from typing import List, Tuple, Optional, Callable, Literal, Dict, Any
import logging
from dataclasses import dataclass, field
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor
import time
from enum import Enum
import json
import os
from moviepy.video.io.VideoFileClip import VideoFileClip
import moviepy

class AlgorithmType(Enum):
    """场景检测算法类型"""
    FRAME_DIFF = "diff"
    HIST_DIFF = "hist"
    CONTENT_DIFF = "content"
    HYBRID = "hybrid"
    SMART = "smart"  # 新增智能分镜算法

@dataclass
class SceneInfo:
    """场景信息数据类"""
    index: int
    start_frame: int
    end_frame: int
    duration: float
    preview_path: Optional[str] = None
    video_path: Optional[str] = None

    def to_dict(self):
        return {
            'index': self.index,
            'start_frame': self.start_frame,
            'end_frame': self.end_frame,
            'duration': self.duration,
            'preview_path': self.preview_path,
            'video_path': self.video_path
        }

    @staticmethod
    def from_dict(data):
        return SceneInfo(
            index=data.get('index', 0),
            start_frame=data.get('start_frame', 0),
            end_frame=data.get('end_frame', 0),
            duration=data.get('duration', 0.0),
            preview_path=data.get('preview_path'),
            video_path=data.get('video_path')
        )

class SceneDetectionStrategy:
    """场景检测算法策略基类"""
    def detect(self, cap: cv2.VideoCapture, fps: float, total_frames: int, 
               threshold: float, min_scene_length: int, 
               progress_callback: Optional[Callable[[float], None]] = None,
               **kwargs) -> List[SceneInfo]:
        raise NotImplementedError

    def get_default_params(self) -> Dict[str, Any]:
        """获取默认参数"""
        return {}

class FrameDiffStrategy(SceneDetectionStrategy):
    """帧差法"""
    def get_default_params(self) -> Dict[str, Any]:
        return {
            "threshold": 30.0,
            "min_scene_length": 15,
            "use_adaptive": False,
            "adaptive_window": 30,
            "use_channels": ["gray"]  # 可选: ["gray", "hsv", "lab"]
        }

    def detect(self, cap, fps, total_frames, threshold, min_scene_length, progress_callback=None, **kwargs):
        scenes = []
        prev_frames = {}
        frame_count = 0
        scene_start = 0
        use_channels = kwargs.get("use_channels", ["gray"])
        use_adaptive = kwargs.get("use_adaptive", False)
        adaptive_window = kwargs.get("adaptive_window", 30)
        recent_diffs = []
        video_path = kwargs.get("video_path")
        cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
        
        while True:
            ret, frame = cap.read()
            if not ret:
                break

            current_frames = {}
            max_diff = 0

            # 处理不同颜色通道
            if "gray" in use_channels:
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                current_frames["gray"] = gray
                if "gray" in prev_frames:
                    diff = cv2.absdiff(gray, prev_frames["gray"])
                    max_diff = max(max_diff, np.mean(diff))

            if "hsv" in use_channels:
                hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
                current_frames["hsv"] = hsv
                if "hsv" in prev_frames:
                    diff = cv2.absdiff(hsv, prev_frames["hsv"])
                    max_diff = max(max_diff, np.mean(diff))

            if "lab" in use_channels:
                lab = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)
                current_frames["lab"] = lab
                if "lab" in prev_frames:
                    diff = cv2.absdiff(lab, prev_frames["lab"])
                    max_diff = max(max_diff, np.mean(diff))

            # 自适应阈值处理
            if use_adaptive:
                recent_diffs.append(max_diff)
                if len(recent_diffs) > adaptive_window:
                    recent_diffs.pop(0)
                adaptive_threshold = np.mean(recent_diffs) * 1.5
                current_threshold = max(threshold, adaptive_threshold)
            else:
                current_threshold = threshold

            if prev_frames and max_diff > current_threshold and (frame_count - scene_start) >= min_scene_length:
                scenes.append(SceneInfo(
                    index=len(scenes),
                    start_frame=scene_start,
                    end_frame=frame_count,
                    duration=(frame_count - scene_start) / fps,
                    video_path=video_path
                ))
                scene_start = frame_count

            prev_frames = current_frames
            frame_count += 1
            
            if progress_callback and total_frames > 0:
                progress_callback(frame_count / total_frames)

        # 最后一个场景
        if frame_count - scene_start >= min_scene_length:
            scenes.append(SceneInfo(
                index=len(scenes),
                start_frame=scene_start,
                end_frame=frame_count,
                duration=(frame_count - scene_start) / fps,
                video_path=video_path
            ))

        return scenes

class HistDiffStrategy(SceneDetectionStrategy):
    """直方图法"""
    def get_default_params(self) -> Dict[str, Any]:
        return {
            "threshold": 0.3,
            "min_scene_length": 15,
            "hist_size": 256,
            "channels": [0, 1, 2],  # BGR通道
            "use_weighted": True
        }

    def detect(self, cap, fps, total_frames, threshold, min_scene_length, progress_callback=None, **kwargs):
        scenes = []
        prev_hist = None
        frame_count = 0
        scene_start = 0
        hist_size = kwargs.get("hist_size", 256)
        channels = kwargs.get("channels", [0, 1, 2])
        use_weighted = kwargs.get("use_weighted", True)
        video_path = kwargs.get("video_path")
        
        cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
        
        while True:
            ret, frame = cap.read()
            if not ret:
                break

            # 计算多通道直方图
            hist = cv2.calcHist([frame], channels, None, [hist_size] * len(channels), 
                              [0, 256] * len(channels))
            hist = cv2.normalize(hist, hist).flatten()

            if prev_hist is not None:
                # 计算直方图相似度
                score = cv2.compareHist(prev_hist, hist, cv2.HISTCMP_BHATTACHARYYA)
                
                # 加权处理
                if use_weighted:
                    weights = [0.5, 0.3, 0.2]  # BGR通道权重
                    weighted_score = sum(score * w for w in weights[:len(channels)])
                    score = weighted_score

                if score > threshold and (frame_count - scene_start) >= min_scene_length:
                    scenes.append(SceneInfo(
                        index=len(scenes),
                        start_frame=scene_start,
                        end_frame=frame_count,
                        duration=(frame_count - scene_start) / fps,
                        video_path=video_path
                    ))
                    scene_start = frame_count

            prev_hist = hist
            frame_count += 1
            
            if progress_callback and total_frames > 0:
                progress_callback(frame_count / total_frames)

        if frame_count - scene_start >= min_scene_length:
            scenes.append(SceneInfo(
                index=len(scenes),
                start_frame=scene_start,
                end_frame=frame_count,
                duration=(frame_count - scene_start) / fps,
                video_path=video_path
            ))

        return scenes

class ContentDiffStrategy(SceneDetectionStrategy):
    """内容变化法（基于均值和方差）"""
    def get_default_params(self) -> Dict[str, Any]:
        return {
            "threshold": 50.0,
            "min_scene_length": 15,
            "use_blocks": True,
            "block_size": 8,
            "use_motion": True
        }

    def detect(self, cap, fps, total_frames, threshold, min_scene_length, progress_callback=None, **kwargs):
        scenes = []
        prev_stats = None
        frame_count = 0
        scene_start = 0
        use_blocks = kwargs.get("use_blocks", True)
        block_size = kwargs.get("block_size", 8)
        use_motion = kwargs.get("use_motion", True)
        video_path = kwargs.get("video_path")
        
        cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
        
        while True:
            ret, frame = cap.read()
            if not ret:
                break

            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            
            if use_blocks:
                # 分块处理
                h, w = gray.shape
                blocks_h = h // block_size
                blocks_w = w // block_size
                block_diffs = []
                
                for i in range(blocks_h):
                    for j in range(blocks_w):
                        block = gray[i*block_size:(i+1)*block_size, 
                                   j*block_size:(j+1)*block_size]
                        mean, stddev = cv2.meanStdDev(block)
                        block_diffs.append((mean[0][0], stddev[0][0]))
                
                current_stats = np.mean(block_diffs, axis=0)
            else:
                mean, stddev = cv2.meanStdDev(gray)
                current_stats = (mean[0][0], stddev[0][0])

            if prev_stats is not None:
                # 计算统计量差异
                diff = abs(current_stats[0] - prev_stats[0]) + abs(current_stats[1] - prev_stats[1])
                
                # 运动检测
                if use_motion:
                    flow = cv2.calcOpticalFlowFarneback(
                        prev_gray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0
                    )
                    motion_magnitude = np.mean(np.abs(flow))
                    diff += motion_magnitude * 10

                if diff > threshold and (frame_count - scene_start) >= min_scene_length:
                    scenes.append(SceneInfo(
                        index=len(scenes),
                        start_frame=scene_start,
                        end_frame=frame_count,
                        duration=(frame_count - scene_start) / fps,
                        video_path=video_path
                    ))
                    scene_start = frame_count

            prev_stats = current_stats
            prev_gray = gray
            frame_count += 1
            
            if progress_callback and total_frames > 0:
                progress_callback(frame_count / total_frames)

        if frame_count - scene_start >= min_scene_length:
            scenes.append(SceneInfo(
                index=len(scenes),
                start_frame=scene_start,
                end_frame=frame_count,
                duration=(frame_count - scene_start) / fps,
                video_path=video_path
            ))

        return scenes

class HybridStrategy(SceneDetectionStrategy):
    """混合策略：结合多种算法"""
    def get_default_params(self) -> Dict[str, Any]:
        return {
            "threshold": 0.5,
            "min_scene_length": 15,
            "weights": {
                "diff": 0.4,
                "hist": 0.3,
                "content": 0.3
            }
        }

    def detect(self, cap, fps, total_frames, threshold, min_scene_length, progress_callback=None, **kwargs):
        weights = kwargs.get("weights", {"diff": 0.4, "hist": 0.3, "content": 0.3})
        strategies = {
            "diff": FrameDiffStrategy(),
            "hist": HistDiffStrategy(),
            "content": ContentDiffStrategy()
        }
        
        all_scenes = {}
        for name, strategy in strategies.items():
            if name in weights:
                scenes = strategy.detect(cap, fps, total_frames, threshold, min_scene_length, progress_callback, **kwargs)
                all_scenes[name] = scenes
                cap.set(cv2.CAP_PROP_POS_FRAMES, 0)  # 重置视频位置

        # 合并场景
        merged_scenes = self._merge_scenes(all_scenes, weights, threshold)
        return merged_scenes

    def _merge_scenes(self, all_scenes: Dict[str, List[SceneInfo]], 
                     weights: Dict[str, float], threshold: float) -> List[SceneInfo]:
        """合并多个算法的检测结果"""
        if not all_scenes:
            return []

        # 收集所有场景边界
        boundaries = set()
        for scenes in all_scenes.values():
            for scene in scenes:
                boundaries.add(scene.start_frame)
                boundaries.add(scene.end_frame)
        boundaries = sorted(list(boundaries))

        # 合并场景
        merged = []
        for i in range(len(boundaries) - 1):
            start = boundaries[i]
            end = boundaries[i + 1]
            
            # 计算加权得分
            scores = []
            for name, scenes in all_scenes.items():
                weight = weights.get(name, 0)
                if weight > 0:
                    for scene in scenes:
                        if scene.start_frame <= start and scene.end_frame >= end:
                            scores.append(weight * scene.duration)
                            break
            
            if scores and sum(scores) > threshold:
                merged.append(SceneInfo(
                    index=len(merged),
                    start_frame=start,
                    end_frame=end,
                    duration=sum(scores)
                ))

        return merged

class SmartSceneStrategy(SceneDetectionStrategy):
    """智能分镜策略：融合多种特征"""
    def get_default_params(self) -> Dict[str, Any]:
        return {
            "threshold": 0.5,
            "min_scene_length": 15,
            "weights": {
                "visual": 0.4,    # 视觉特征权重
                "audio": 0.3,     # 音频特征权重
                "motion": 0.3     # 运动特征权重
            },
            "visual_params": {
                "hist_threshold": 0.3,
                "ssim_threshold": 0.8,
                "edge_threshold": 0.2
            },
            "audio_params": {
                "energy_threshold": 0.3,
                "silence_threshold": 0.1,
                "window_size": 30
            },
            "motion_params": {
                "flow_threshold": 0.5,
                "block_size": 16
            }
        }

    def detect(self, cap, fps, total_frames, threshold, min_scene_length, progress_callback=None, **kwargs):
        scenes = []
        frame_count = 0
        scene_start = 0
        video_path = kwargs.get("video_path")
        
        # 获取参数（合并默认参数和传入参数，保证window_size存在）
        weights = kwargs.get("weights", self.get_default_params()["weights"])
        default_visual_params = self.get_default_params()["visual_params"]
        visual_params = {**default_visual_params, **kwargs.get("visual_params", {})}
        default_audio_params = self.get_default_params()["audio_params"]
        audio_params = {**default_audio_params, **kwargs.get("audio_params", {})}
        default_motion_params = self.get_default_params()["motion_params"]
        motion_params = {**default_motion_params, **kwargs.get("motion_params", {})}
        
        # 初始化特征提取器
        prev_frame = None
        prev_hist = None
        prev_edges = None
        audio_energy = []
        motion_flow = []
        
        # 尝试加载音频
        audio_data = None
        try:
            if video_path:
                video = VideoFileClip(video_path)
                if video.audio is not None:
                    audio_data = video.audio.to_soundarray()
                    if len(audio_data.shape) > 1:  # 如果是立体声，转换为单声道
                        audio_data = audio_data.mean(axis=1)
                video.close()
        except Exception as e:
            logging.warning(f"无法加载音频数据: {str(e)}")
        
        cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
        
        while True:
            ret, frame = cap.read()
            if not ret:
                break
                
            # 1. 视觉特征提取
            visual_score = 0
            if prev_frame is not None:
                # 直方图对比
                hist = cv2.calcHist([frame], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
                hist = cv2.normalize(hist, hist).flatten()
                if prev_hist is not None:
                    hist_diff = cv2.compareHist(prev_hist, hist, cv2.HISTCMP_BHATTACHARYYA)
                    visual_score += hist_diff * weights["visual"]
                prev_hist = hist
                
                # 结构相似度
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
                ssim_score = self._calculate_ssim(prev_gray, gray)
                visual_score += (1 - ssim_score) * weights["visual"]
                
                # 边缘变化
                edges = cv2.Canny(gray, 100, 200)
                if prev_edges is not None:
                    edge_diff = np.mean(np.abs(edges - prev_edges))
                    visual_score += edge_diff * weights["visual"]
                prev_edges = edges
            
            # 2. 运动特征提取
            motion_score = 0
            if prev_frame is not None:
                # 光流计算
                prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                flow = cv2.calcOpticalFlowFarneback(
                    prev_gray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0
                )
                motion_magnitude = np.mean(np.abs(flow))
                motion_score = motion_magnitude * weights["motion"]
                motion_flow.append(motion_magnitude)
                
                # 保持固定窗口大小
                motion_window = motion_params.get("window_size", 30)
                if len(motion_flow) > motion_window:
                    motion_flow.pop(0)
            
            # 3. 音频特征提取
            audio_score = 0
            if audio_data is not None:
                # 计算当前帧对应的音频片段
                frame_time = frame_count / fps
                audio_sample_rate = len(audio_data) / (total_frames / fps)
                start_sample = int(frame_time * audio_sample_rate)
                end_sample = int((frame_time + 1/fps) * audio_sample_rate)
                
                if start_sample < len(audio_data) and end_sample <= len(audio_data):
                    # 计算音频能量
                    frame_audio = audio_data[start_sample:end_sample]
                    energy = np.mean(np.abs(frame_audio))
                    audio_energy.append(energy)
                    
                    # 保持固定窗口大小
                    audio_window = audio_params.get("window_size", 30)
                    if len(audio_energy) > audio_window:
                        audio_energy.pop(0)
                    
                    # 计算音频变化
                    if len(audio_energy) > 1:
                        audio_diff = abs(audio_energy[-1] - audio_energy[-2])
                        audio_score = audio_diff * weights["audio"]
            
            # 计算综合得分
            total_score = visual_score + motion_score + audio_score
            
            # 场景切换检测
            if total_score > threshold and (frame_count - scene_start) >= min_scene_length:
                # 检查是否是真实场景切换
                if self._is_real_scene_change(
                    frame_count, scene_start, motion_flow,
                    visual_score, motion_score, audio_score
                ):
                    scenes.append(SceneInfo(
                        index=len(scenes),
                        start_frame=scene_start,
                        end_frame=frame_count,
                        duration=(frame_count - scene_start) / fps,
                        video_path=video_path
                    ))
                    scene_start = frame_count
            
            prev_frame = frame.copy()
            frame_count += 1
            
            if progress_callback and total_frames > 0:
                progress_callback(frame_count / total_frames)
        
        # 添加最后一个场景
        if frame_count - scene_start >= min_scene_length:
            scenes.append(SceneInfo(
                index=len(scenes),
                start_frame=scene_start,
                end_frame=frame_count,
                duration=(frame_count - scene_start) / fps,
                video_path=video_path
            ))
        
        return scenes
    
    def _calculate_ssim(self, img1, img2):
        """计算结构相似度"""
        C1 = (0.01 * 255) ** 2
        C2 = (0.03 * 255) ** 2
        
        img1 = img1.astype(np.float64)
        img2 = img2.astype(np.float64)
        kernel = cv2.getGaussianKernel(11, 1.5)
        window = np.outer(kernel, kernel.transpose())
        
        mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]
        mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
        mu1_sq = mu1 ** 2
        mu2_sq = mu2 ** 2
        mu1_mu2 = mu1 * mu2
        sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq
        sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq
        sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
        
        ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
        return ssim_map.mean()
    
    def _is_real_scene_change(self, frame_count, scene_start, motion_flow,
                            visual_score, motion_score, audio_score):
        """判断是否是真实的场景切换"""
        # 1. 检查运动特征
        if motion_flow:
            avg_motion = np.mean(motion_flow)
            if motion_score < avg_motion * 0.5:  # 运动太小，可能是误检
                return False
        
        # 2. 检查视觉特征
        if visual_score < 0.3:  # 视觉变化太小，可能是误检
            return False
        
        # 3. 检查场景长度
        min_scene_frames = 15  # 最小场景长度（帧数）
        if frame_count - scene_start < min_scene_frames:
            return False
        
        return True

class VideoProcessor:
    """视频处理器类"""
    
    def __init__(self):
        """初始化视频处理器"""
        self.cap = None
        self.video_path = None
        self.fps = 0
        self.total_frames = 0
        self.width = 0
        self.height = 0
        self._cache_dir = Path("cache")
        self._cache_dir.mkdir(exist_ok=True)
        
        # 初始化检测策略
        self.strategies = {
            AlgorithmType.FRAME_DIFF: FrameDiffStrategy(),
            AlgorithmType.HIST_DIFF: HistDiffStrategy(),
            AlgorithmType.CONTENT_DIFF: ContentDiffStrategy(),
            AlgorithmType.HYBRID: HybridStrategy(),
            AlgorithmType.SMART: SmartSceneStrategy()  # 添加智能分镜策略
        }
        self.logger = logging.getLogger(__name__)
        self._last_video_path = None

    def load_video(self, video_path: str) -> bool:
        """加载视频文件
        
        Args:
            video_path: 视频文件路径
            
        Returns:
            bool: 是否成功加载
        """
        try:
            self.cap = cv2.VideoCapture(video_path)
            if not self.cap.isOpened():
                self.logger.error(f"无法打开视频文件: {video_path}")
                return False
                
            self.video_path = video_path
            self.fps = self.cap.get(cv2.CAP_PROP_FPS)
            self.total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
            self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            
            self.logger.info(f"成功加载视频: {video_path}")
            self.logger.info(f"视频信息: {self.fps}fps, {self.total_frames}帧, "
                           f"分辨率: {self.width}x{self.height}")
            self._last_video_path = video_path
            return True
            
        except Exception as e:
            self.logger.error(f"加载视频时出错: {str(e)}")
            return False
            
    def get_frame(self, frame_number: int) -> Optional[np.ndarray]:
        """获取指定帧号的视频帧"""
        if not self.cap or not self.cap.isOpened():
            return None
            
        try:
            self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
            ret, frame = self.cap.read()
            if not ret:
                return None
                
            # 转换颜色空间从BGR到RGB
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            return frame
        except Exception as e:
            self.logger.error(f"获取视频帧失败: {str(e)}")
            return None
            
    def get_video_info(self) -> Dict:
        """获取视频信息
        
        Returns:
            Dict: 视频信息字典
        """
        return {
            "fps": self.fps,
            "total_frames": self.total_frames,
            "width": self.width,
            "height": self.height,
            "duration": self.total_frames / self.fps if self.fps > 0 else 0
        }
        
    def detect_scenes(self, threshold: float = 0.3, min_scene_length: int = 15,
                     algorithm: str = "diff", progress_callback: Optional[Callable[[float], None]] = None,
                     **kwargs) -> List[SceneInfo]:
        """检测视频场景"""
        if not self.cap or not self.cap.isOpened():
            self.logger.error("未加载视频")
            return []
            
        try:
            # 选择检测策略
            strategy = self.strategies.get(AlgorithmType(algorithm), self.strategies[AlgorithmType.FRAME_DIFF])
            
            # 检查缓存
            cache_file = self._get_cache_file(algorithm, threshold, min_scene_length, **kwargs)
            if cache_file.exists():
                self.logger.info("使用缓存的检测结果")
                return self._load_from_cache(cache_file)

            # 执行检测
            scenes = strategy.detect(
                self.cap,
                self.fps,
                self.total_frames,
                threshold,
                min_scene_length,
                progress_callback,
                video_path=self.video_path,
                **kwargs
            )

            # 保存到缓存
            self._save_to_cache(cache_file, scenes)
            
            self.logger.info(f"检测到 {len(scenes)} 个场景 (算法: {algorithm})")
            return scenes
        except Exception as e:
            self.logger.error(f"场景检测失败: {str(e)}")
            return []
        
    def extract_scene_frames(self, scenes: List[SceneInfo], output_dir: str) -> bool:
        """提取场景帧
        
        Args:
            scenes: 场景信息列表
            output_dir: 输出目录
            
        Returns:
            bool: 是否成功提取
        """
        if not self.cap:
            self.logger.error("未加载视频")
            return False
            
        try:
            for scene in scenes:
                # 设置视频位置
                self.cap.set(cv2.CAP_PROP_POS_FRAMES, scene.start_frame)
                
                # 读取场景中间帧
                ret, frame = self.cap.read()
                if ret:
                    # 保存预览图
                    preview_path = f"{output_dir}/scene_{scene.index:03d}.jpg"
                    cv2.imwrite(preview_path, frame)
                    scene.preview_path = preview_path
                    
            return True
            
        except Exception as e:
            self.logger.error(f"提取场景帧时出错: {str(e)}")
            return False
            
    def export_scenes(self, scenes: List[SceneInfo], output_path: str) -> bool:
        """导出场景
        
        Args:
            scenes: 场景信息列表
            output_path: 输出路径
            
        Returns:
            bool: 是否成功导出
        """
        # TODO: 实现导出功能
        return True
        
    def release(self):
        """释放资源"""
        if self.cap:
            self.cap.release()
            self.cap = None

    def _get_cache_file(self, algorithm: str, threshold: float, min_scene_length: int, **kwargs) -> Path:
        """获取缓存文件路径"""
        params = {
            "algorithm": algorithm,
            "threshold": threshold,
            "min_scene_length": min_scene_length,
            **kwargs
        }
        cache_name = f"{hash(str(params))}.json"
        return self._cache_dir / cache_name

    def _save_to_cache(self, cache_file: Path, scenes: List[SceneInfo]):
        """保存检测结果到缓存"""
        try:
            data = [scene.to_dict() for scene in scenes]
            with open(cache_file, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
        except Exception as e:
            self.logger.error(f"保存缓存失败: {str(e)}")

    def _load_from_cache(self, cache_file: Path) -> List[SceneInfo]:
        """从缓存加载检测结果"""
        try:
            with open(cache_file, 'r', encoding='utf-8') as f:
                data = json.load(f)
            return [SceneInfo.from_dict(item) for item in data]
        except Exception as e:
            self.logger.error(f"加载缓存失败: {str(e)}")
            return []

    def merge_similar_scenes(self, scenes: List[SceneInfo], similarity_threshold: float = 0.8) -> List[SceneInfo]:
        """合并相似场景"""
        if not scenes:
            return []

        merged = []
        current = scenes[0]

        for next_scene in scenes[1:]:
            if (next_scene.start_frame - current.end_frame) <= 5 and \
               abs(next_scene.duration - current.duration) < similarity_threshold:
                # 合并场景
                current = SceneInfo(
                    index=len(merged),
                    start_frame=current.start_frame,
                    end_frame=next_scene.end_frame,
                    duration=(next_scene.end_frame - current.start_frame) / self.fps
                )
            else:
                merged.append(current)
                current = next_scene

        merged.append(current)
        return merged

    def generate_preview(self, scenes: List[SceneInfo], output_path: str,
                        preview_size: Tuple[int, int] = (320, 180),
                        cols: int = 5) -> bool:
        """生成场景预览图"""
        try:
            if not scenes:
                return False

            # 计算预览图布局
            rows = (len(scenes) + cols - 1) // cols
            preview = np.zeros((rows * preview_size[1], cols * preview_size[0], 3), dtype=np.uint8)

            # 加载视频
            cap = cv2.VideoCapture(self._last_video_path)
            if not cap.isOpened():
                return False

            # 生成预览
            for i, scene in enumerate(scenes):
                row = i // cols
                col = i % cols
                
                # 读取场景中间帧
                mid_frame = (scene.start_frame + scene.end_frame) // 2
                cap.set(cv2.CAP_PROP_POS_FRAMES, mid_frame)
                ret, frame = cap.read()
                
                if ret:
                    # 调整大小并放置
                    frame = cv2.resize(frame, preview_size)
                    y1 = row * preview_size[1]
                    y2 = (row + 1) * preview_size[1]
                    x1 = col * preview_size[0]
                    x2 = (col + 1) * preview_size[0]
                    preview[y1:y2, x1:x2] = frame

            # 保存预览图
            cv2.imwrite(output_path, preview)
            cap.release()
            return True

        except Exception as e:
            self.logger.error(f"生成预览图失败: {str(e)}")
            return False

    def export_scene_videos(self, scenes: list, output_dir: str) -> list:
        """
        批量导出分镜片段为mp4，返回导出后的视频文件路径列表
        Args:
            scenes: 场景信息列表
            output_dir: 输出目录
        Returns:
            List[str]: 导出后的视频文件路径列表
        """
        os.makedirs(output_dir, exist_ok=True)
        exported_files = []
        if not self.video_path:
            raise RuntimeError("未加载视频")
        video = VideoFileClip(self.video_path)
        for scene in scenes:
            start = scene.start_frame / self.fps
            end = scene.end_frame / self.fps
            out_path = os.path.join(output_dir, f"scene_{scene.index:03d}.mp4")
            clip = video.subclipped(start, end)
            clip.write_videofile(out_path, codec="libx264", audio_codec="aac", logger=None)
            exported_files.append(out_path)
        video.close()
        return exported_files

print('moviepy version:', moviepy.__version__)
print('VideoFileClip:', VideoFileClip)
print('VideoFileClip methods:', dir(VideoFileClip)) 