"""
视频切分服务

提供高级视频切分功能，包括智能场景检测、内容分析等。
"""

import os
import json
import asyncio
from typing import List, Dict, Any, Optional, Tuple
from pathlib import Path
from dataclasses import dataclass, asdict
from loguru import logger
from src.core.di import injector
from .shot_detection_service import ShotDetectionService
from .config_service import config_service
from ..detectors import ShotBoundary, DetectionResult
from .ffmpeg_slice_service import FfmpegSliceService

# 定义具体的异常类型
class VideoSegmentationError(Exception):
    """视频切分基础异常"""
    pass


class VideoNotFoundError(VideoSegmentationError):
    """视频文件未找到异常"""
    pass


class VideoInfoError(VideoSegmentationError):
    """视频信息获取异常"""
    pass


class FFmpegError(VideoSegmentationError):
    """FFmpeg处理异常"""
    pass


class ConfigurationError(VideoSegmentationError):
    """配置错误异常"""
    pass


@dataclass
class VideoSegment:
    """视频片段信息"""
    segment_id: int
    start_time: float
    end_time: float
    duration: float
    start_frame: int
    end_frame: int
    confidence: float
    segment_type: str = 'shot'  # shot, scene, chapter
    file_path: Optional[str] = None
    metadata: Dict[str, Any] = None
    
    def __post_init__(self):
        if self.metadata is None:
            self.metadata = {}


@dataclass
class SegmentationResult:
    """切分结果"""
    video_path: str
    segments: List[VideoSegment]
    algorithm: str
    processing_time: float
    total_duration: float
    segment_count: int
    metadata: Dict[str, Any] = None
    
    def __post_init__(self):
        if self.metadata is None:
            self.metadata = {}

from src.core.di import Injectable
@Injectable()
class VideoSegmentationService:
    """视频切分服务"""
    
    def __init__(self):
        """
        初始化视频切分服务
        """
        self.config = {}
        self.logger = logger.bind(service="VideoSegmentation")

        # 从全局配置加载默认值
        self.default_config = self._load_default_config()

        # 深度合并配置
        self.config = self._deep_merge_config(self.default_config, self.config)

        # 初始化镜头检测服务
        self.shot_detection_service = ShotDetectionService()
        self.shot_detection_service.configure(self.config.get('shot_detection', {}))

    def configure(self, config: Optional[Dict[str, Any]] = None):
        """
        配置服务

        Args:
            config: 配置字典
        """
        if config:
            self.config = self._deep_merge_config(self.config, config)
            # 重新配置镜头检测服务
            self.shot_detection_service = ShotDetectionService()
            if hasattr(self.shot_detection_service, 'configure'):
                self.shot_detection_service.configure(self.config.get('shot_detection', {}))
        return self

    def _load_default_config(self) -> Dict[str, Any]:
        """从全局配置加载默认值"""
        try:
            # 从全局配置服务获取视频分割配置
            video_seg_config = config_service.get_section("video_segmentation")

            # 如果配置文件中没有视频分割配置，使用硬编码默认值
            if not video_seg_config:
                self.logger.warning("配置文件中未找到video_segmentation配置，使用默认值")
                return {
                    'min_segment_duration': 0.01,
                    'max_segment_duration': 300.0,
                    'scene_merge_threshold': 0.7,
                    'output_format': 'mp4',
                    'quality_preset': 'medium',
                    'enable_scene_detection': True,
                    'enable_content_analysis': False,
                    'shot_detection': {
                        'default_algorithm': 'multi_channel_histogram',
                        'histogram': {'threshold': 0.4},
                        'multi_channel_histogram': {'threshold': 0.35},
                        'frame_diff': {'threshold': 0.3},
                        'enhanced_frame_diff': {'threshold': 0.25},
                        'adaptive_histogram': {'threshold': 0.3}
                    }
                }

            self.logger.info("从配置文件加载视频分割默认配置")
            return video_seg_config

        except Exception as e:
            self.logger.warning(f"加载配置失败，使用硬编码默认值: {e}")
            return {
                'min_segment_duration': 0.01,
                'max_segment_duration': 300.0,
                'scene_merge_threshold': 0.7,
                'output_format': 'mp4',
                'quality_preset': 'medium',
                'enable_scene_detection': True,
                'enable_content_analysis': False,
                'shot_detection': {
                    'default_algorithm': 'multi_channel_histogram',
                    'histogram': {'threshold': 0.4},
                    'multi_channel_histogram': {'threshold': 0.35},
                    'frame_diff': {'threshold': 0.3},
                    'enhanced_frame_diff': {'threshold': 0.25},
                    'adaptive_histogram': {'threshold': 0.3}
                }
            }

    def _deep_merge_config(self, default: Dict[str, Any], user: Dict[str, Any]) -> Dict[str, Any]:
        """深度合并配置字典"""
        result = default.copy()

        for key, value in user.items():
            if key in result and isinstance(result[key], dict) and isinstance(value, dict):
                # 递归合并嵌套字典
                result[key] = self._deep_merge_config(result[key], value)
            else:
                # 直接覆盖或添加新键
                result[key] = value

        return result

    def _apply_preset(self, preset: str) -> Tuple[str, str]:
        """
        应用预设组合

        Args:
            preset: 预设名称

        Returns:
            Tuple[method, algorithm]: 方法和算法组合
        """
        presets = {
            'high_quality': ('smart', 'multi_channel_histogram'),
            'fast': ('shots', 'frame_diff'),
            'auto': ('smart', 'auto'),
            'max_precision': ('scenes', 'ensemble')
        }

        if preset not in presets:
            available = ', '.join(presets.keys())
            raise ValueError(f"Unknown preset '{preset}'. Available presets: {available}")

        return presets[preset]

    async def segment_video(self,
                           video_path: str,
                           output_dir: str,
                           method: str = 'smart',
                           preset: Optional[str] = None,
                           **kwargs) -> SegmentationResult:
        """
        切分视频

        Args:
            video_path: 输入视频路径
            output_dir: 输出目录
            method: 切分方法 ('shots', 'scenes', 'smart', 'time_based')
            preset: 预设组合 ('high_quality', 'fast', 'auto', 'max_precision')
            **kwargs: 额外参数

        Returns:
            SegmentationResult: 切分结果
        """
        if not os.path.exists(video_path):
            raise VideoNotFoundError(f"Video file not found: {video_path}")

        # 处理预设组合
        if preset:
            method, algorithm = self._apply_preset(preset)
            kwargs['algorithm'] = algorithm
            self.logger.info(f"Using preset '{preset}': method={method}, algorithm={algorithm}")

        self.logger.info(f"Starting video segmentation: {video_path} using {method}")

        try:
            if method == 'shots':
                return await self._segment_by_shots(video_path, output_dir, **kwargs)
            elif method == 'scenes':
                return await self._segment_by_scenes(video_path, output_dir, **kwargs)
            elif method == 'smart':
                return await self._smart_segmentation(video_path, output_dir, **kwargs)
            elif method == 'time_based':
                return await self._segment_by_time(video_path, output_dir, **kwargs)
            else:
                raise ConfigurationError(f"Unknown segmentation method: {method}")

        except VideoSegmentationError:
            # 重新抛出我们自定义的异常
            raise
        except OSError as e:
            self.logger.error(f"File system error in video segmentation: {e}")
            raise VideoSegmentationError(f"File system error: {e}") from e
        except Exception as e:
            self.logger.error(f"Unexpected error in video segmentation: {e}")
            raise VideoSegmentationError(f"Unexpected error: {e}") from e
    
    async def _segment_by_shots(self, 
                               video_path: str, 
                               output_dir: str,
                               **kwargs) -> SegmentationResult:
        """基于镜头检测的切分"""
        algorithm = "multi_channel_histogram"
        start_time = asyncio.get_event_loop().time()
        
        # 检测镜头边界
        detection_result = await self.shot_detection_service.detect_shots(
            video_path, algorithm, output_dir
        )
        
        # 转换为片段
        segments = await self._boundaries_to_segments(
            detection_result.boundaries, video_path, 'shot'
        )
        
        # 生成视频文件
        if kwargs.get('generate_files', True):
            await self._generate_segment_files(segments, video_path, output_dir)
        
        processing_time = asyncio.get_event_loop().time() - start_time
        
        return SegmentationResult(
            video_path=video_path,
            segments=segments,
            algorithm=f"shots_{algorithm}",
            processing_time=processing_time,
            total_duration=segments[-1].end_time if segments else 0.0,
            segment_count=len(segments),
            metadata={
                'detection_result': asdict(detection_result),
                'method': 'shots'
            }
        )
    
    async def _segment_by_scenes(self,
                                video_path: str,
                                output_dir: str,
                                algorithm: str = 'ensemble',
                                **kwargs) -> SegmentationResult:
        """基于场景检测的切分"""
        start_time = asyncio.get_event_loop().time()

        # 首先检测镜头
        detection_result = await self.shot_detection_service.detect_shots(
            video_path, algorithm, output_dir
        )
        
        # 将镜头合并为场景
        scene_boundaries = await self._merge_shots_to_scenes(
            detection_result.boundaries, video_path
        )
        
        # 转换为片段
        segments = await self._boundaries_to_segments(
            scene_boundaries, video_path, 'scene'
        )
        
        # 生成视频文件
        if kwargs.get('generate_files', True):
            await self._generate_segment_files(segments, video_path, output_dir)
        
        processing_time = asyncio.get_event_loop().time() - start_time
        
        return SegmentationResult(
            video_path=video_path,
            segments=segments,
            algorithm="scenes",
            processing_time=processing_time,
            total_duration=segments[-1].end_time if segments else 0.0,
            segment_count=len(segments),
            metadata={
                'original_shots': len(detection_result.boundaries),
                'merged_scenes': len(segments),
                'method': 'scenes'
            }
        )
    
    async def _smart_segmentation(self,
                                 video_path: str,
                                 output_dir: str,
                                 algorithm: str = 'auto',
                                 **kwargs) -> SegmentationResult:
        """智能切分（结合多种方法）"""
        start_time = asyncio.get_event_loop().time()

        # 1. 检测镜头
        shot_result = await self.shot_detection_service.detect_shots(
            video_path, algorithm, output_dir
        )
        
        # 2. 分析视频特征
        video_features = await self._analyze_video_features(video_path)
        
        # 3. 根据特征选择最佳策略
        if video_features['avg_shot_duration'] < 3.0:
            # 短镜头较多，使用场景合并
            boundaries = await self._merge_shots_to_scenes(
                shot_result.boundaries, video_path
            )
            segment_type = 'scene'
        elif video_features['shot_count'] < 10:
            # 镜头较少，直接使用镜头切分
            boundaries = shot_result.boundaries
            segment_type = 'shot'
        else:
            # 混合策略
            boundaries = await self._adaptive_segmentation(
                shot_result.boundaries, video_path, video_features
            )
            segment_type = 'adaptive'
        
        # 4. 转换为片段
        segments = await self._boundaries_to_segments(
            boundaries, video_path, segment_type
        )
        
        # 5. 生成视频文件
        if kwargs.get('generate_files', True):
            await self._generate_segment_files(segments, video_path, output_dir)
        
        processing_time = asyncio.get_event_loop().time() - start_time
        
        return SegmentationResult(
            video_path=video_path,
            segments=segments,
            algorithm="smart",
            processing_time=processing_time,
            total_duration=segments[-1].end_time if segments else 0.0,
            segment_count=len(segments),
            metadata={
                'video_features': video_features,
                'segment_type': segment_type,
                'method': 'smart'
            }
        )
    
    async def _segment_by_time(self, 
                              video_path: str, 
                              output_dir: str,
                              segment_duration: float = 60.0,
                              **kwargs) -> SegmentationResult:
        """基于时间的固定长度切分"""
        start_time = asyncio.get_event_loop().time()
        
        # 获取视频信息
        video_info = await self._get_video_info(video_path)
        total_duration = video_info['duration']
        fps = video_info['fps']
        
        # 生成时间边界
        segments = []
        segment_id = 1
        current_time = 0.0
        
        while current_time < total_duration:
            end_time = min(current_time + segment_duration, total_duration)
            
            segment = VideoSegment(
                segment_id=segment_id,
                start_time=current_time,
                end_time=end_time,
                duration=end_time - current_time,
                start_frame=int(current_time * fps),
                end_frame=int(end_time * fps),
                confidence=1.0,
                segment_type='time_based'
            )
            
            segments.append(segment)
            segment_id += 1
            current_time = end_time
        
        # 生成视频文件
        if kwargs.get('generate_files', True):
            await self._generate_segment_files(segments, video_path, output_dir)
        
        processing_time = asyncio.get_event_loop().time() - start_time
        
        return SegmentationResult(
            video_path=video_path,
            segments=segments,
            algorithm="time_based",
            processing_time=processing_time,
            total_duration=total_duration,
            segment_count=len(segments),
            metadata={
                'segment_duration': segment_duration,
                'method': 'time_based'
            }
        )
    
    async def _boundaries_to_segments(self,
                                     boundaries: List[ShotBoundary],
                                     video_path: str,
                                     segment_type: str) -> List[VideoSegment]:
        """将边界转换为片段"""
        video_info = await self._get_video_info(video_path)
        total_duration = video_info['duration']
        fps = video_info['fps']

        # 如果没有边界，创建一个包含整个视频的片段
        if not boundaries:
            self.logger.info(f"没有检测到镜头边界，将整个视频作为一个片段: {total_duration:.3f}s")
            segment = VideoSegment(
                segment_id=1,
                start_time=0.0,
                end_time=total_duration,
                duration=total_duration,
                start_frame=0,
                end_frame=int(total_duration * fps),
                confidence=1.0,
                segment_type=segment_type,
                file_path="",  # 将在后续步骤中设置
                metadata={
                    'boundary_type': 'full_video',
                    'source_metadata': {}
                }
            )
            return [segment]
        
        segments = []

        # 确保边界按时间戳排序
        sorted_boundaries = sorted(boundaries, key=lambda b: b.timestamp)

        # 添加开始边界（如果第一个边界不是从0开始）
        all_boundaries = []
        if not sorted_boundaries or sorted_boundaries[0].timestamp > 0.1:  # 如果第一个边界不在开头
            all_boundaries.append(ShotBoundary(0, 0.0, 1.0, boundary_type='start'))

        all_boundaries.extend(sorted_boundaries)

        self.logger.debug(f"Processing {len(all_boundaries)} boundaries for segmentation")

        for i in range(len(all_boundaries)):
            start_boundary = all_boundaries[i]

            if i < len(all_boundaries) - 1:
                end_boundary = all_boundaries[i + 1]
                end_time = end_boundary.timestamp
                end_frame = end_boundary.frame_number
            else:
                end_time = total_duration
                end_frame = int(total_duration * fps)

            duration = end_time - start_boundary.timestamp

            self.logger.debug(f"Segment {i+1}: {start_boundary.timestamp:.3f}s - {end_time:.3f}s (duration: {duration:.3f}s)")

            # 跳过太短的片段
            if duration < self.config['min_segment_duration']:
                self.logger.debug(f"Skipping segment {i+1}: too short ({duration:.3f}s < {self.config['min_segment_duration']}s)")
                continue
            
            segment = VideoSegment(
                segment_id=len(segments) + 1,
                start_time=start_boundary.timestamp,
                end_time=end_time,
                duration=duration,
                start_frame=start_boundary.frame_number,
                end_frame=end_frame,
                confidence=start_boundary.confidence,
                segment_type=segment_type,
                metadata={
                    'boundary_type': getattr(start_boundary, 'boundary_type', 'cut'),
                    'source_metadata': getattr(start_boundary, 'metadata', {})
                }
            )
            
            segments.append(segment)
        
        return segments

    async def _merge_shots_to_scenes(self,
                                    boundaries: List[ShotBoundary],
                                    video_path: str) -> List[ShotBoundary]:
        """将镜头合并为场景"""
        if not boundaries:
            return []

        # 简单的场景合并策略：基于时间间隔和置信度
        merged_boundaries = []
        current_scene_start = boundaries[0]

        for i in range(1, len(boundaries)):
            current_boundary = boundaries[i]
            prev_boundary = boundaries[i-1]

            # 计算时间间隔
            time_gap = current_boundary.timestamp - prev_boundary.timestamp

            # 如果时间间隔较大或置信度很高，认为是新场景
            if (time_gap > self.config['min_segment_duration'] * 2 or
                current_boundary.confidence > self.config['scene_merge_threshold']):

                # 结束当前场景
                merged_boundaries.append(current_boundary)
                current_scene_start = current_boundary

        return merged_boundaries

    async def _analyze_video_features(self, video_path: str) -> Dict[str, Any]:
        """分析视频特征"""
        try:
            import cv2

            cap = None
            try:
                cap = cv2.VideoCapture(video_path)
                if not cap.isOpened():
                    raise VideoInfoError(f"Cannot open video file: {video_path}")

                fps = cap.get(cv2.CAP_PROP_FPS)
                frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
                duration = frame_count / fps if fps > 0 else 0

                if fps <= 0 or frame_count <= 0:
                    raise VideoInfoError(f"Invalid video properties: fps={fps}, frames={frame_count}")

            finally:
                if cap is not None:
                    cap.release()

            # 快速检测获取基本统计
            try:
                detection_result = await self.shot_detection_service.detect_shots(
                    video_path, 'frame_diff'
                )
            except Exception as e:
                raise VideoInfoError(f"Failed to detect shots for analysis: {e}") from e

            shot_count = len(detection_result.boundaries)
            avg_shot_duration = duration / shot_count if shot_count > 0 else duration

            return {
                'duration': duration,
                'fps': fps,
                'frame_count': frame_count,
                'shot_count': shot_count,
                'avg_shot_duration': avg_shot_duration,
                'avg_confidence': sum(b.confidence for b in detection_result.boundaries) / shot_count if shot_count > 0 else 0
            }

        except Exception as e:
            self.logger.error(f"Error analyzing video features: {e}")
            return {
                'duration': 0,
                'fps': 30,
                'frame_count': 0,
                'shot_count': 0,
                'avg_shot_duration': 0,
                'avg_confidence': 0
            }

    async def _adaptive_segmentation(self,
                                   boundaries: List[ShotBoundary],
                                   video_path: str,
                                   features: Dict[str, Any]) -> List[ShotBoundary]:
        """自适应切分策略"""
        if not boundaries:
            return []

        # 根据视频特征调整切分策略
        if features['avg_shot_duration'] < 2.0:
            # 镜头很短，需要合并
            return await self._merge_short_shots(boundaries, min_duration=3.0)
        elif features['avg_shot_duration'] > 30.0:
            # 镜头很长，可能需要进一步切分
            return await self._split_long_shots(boundaries, video_path, max_duration=60.0)
        else:
            # 镜头长度适中，直接使用
            return boundaries

    async def _merge_short_shots(self,
                                boundaries: List[ShotBoundary],
                                min_duration: float) -> List[ShotBoundary]:
        """合并过短的镜头"""
        if not boundaries:
            return []

        merged = []
        current_start = boundaries[0]

        for i in range(1, len(boundaries)):
            duration = boundaries[i].timestamp - current_start.timestamp

            if duration >= min_duration:
                merged.append(boundaries[i])
                current_start = boundaries[i]

        return merged

    async def _split_long_shots(self,
                               boundaries: List[ShotBoundary],
                               video_path: str,
                               max_duration: float) -> List[ShotBoundary]:
        """切分过长的镜头"""
        split_boundaries = []

        # 获取视频实际帧率
        video_info = await self._get_video_info(video_path)
        fps = video_info['fps']

        for i in range(len(boundaries)):
            split_boundaries.append(boundaries[i])

            if i < len(boundaries) - 1:
                duration = boundaries[i+1].timestamp - boundaries[i].timestamp

                if duration > max_duration:
                    # 在长镜头中间添加切分点
                    num_splits = int(duration / max_duration)
                    for j in range(1, num_splits + 1):
                        split_time = boundaries[i].timestamp + j * max_duration
                        split_frame = int(split_time * fps)  # 使用实际帧率

                        split_boundary = ShotBoundary(
                            frame_number=split_frame,
                            timestamp=split_time,
                            confidence=0.5,
                            boundary_type='split',
                            metadata={'reason': 'long_shot_split', 'fps': fps}
                        )
                        split_boundaries.append(split_boundary)

        return sorted(split_boundaries, key=lambda x: x.timestamp)

    async def _get_video_info(self, video_path: str) -> Dict[str, Any]:
        """获取视频信息 - 使用FFprobe获取准确信息"""
        try:
            import asyncio
            import json

            # 使用FFprobe获取准确的视频信息
            cmd = [
                'ffprobe',
                '-v', 'quiet',
                '-print_format', 'json',
                '-show_format',
                '-show_streams',
                video_path
            ]

            process = await asyncio.create_subprocess_exec(
                *cmd,
                stdout=asyncio.subprocess.PIPE,
                stderr=asyncio.subprocess.PIPE
            )

            stdout, stderr = await process.communicate()

            if process.returncode != 0:
                # 如果FFprobe失败，回退到OpenCV
                self.logger.warning(f"FFprobe failed, falling back to OpenCV: {stderr.decode()}")
                return await self._get_video_info_opencv(video_path)

            try:
                probe_data = json.loads(stdout.decode())
                return self._parse_ffprobe_info(probe_data)
            except json.JSONDecodeError as e:
                self.logger.warning(f"Failed to parse FFprobe output, falling back to OpenCV: {e}")
                return await self._get_video_info_opencv(video_path)

        except Exception as e:
            self.logger.warning(f"FFprobe method failed, falling back to OpenCV: {e}")
            return await self._get_video_info_opencv(video_path)

    def _parse_ffprobe_info(self, probe_data: dict) -> Dict[str, Any]:
        """解析FFprobe输出"""
        format_info = probe_data.get('format', {})
        video_stream = None

        # 找到视频流
        for stream in probe_data.get('streams', []):
            if stream.get('codec_type') == 'video':
                video_stream = stream
                break

        if not video_stream:
            raise VideoInfoError("No video stream found")

        # 获取基本信息
        duration = float(format_info.get('duration', 0))
        width = int(video_stream.get('width', 0))
        height = int(video_stream.get('height', 0))

        # 解析帧率
        r_frame_rate = video_stream.get('r_frame_rate', '0/1')
        if '/' in r_frame_rate:
            num, den = map(int, r_frame_rate.split('/'))
            fps = num / den if den != 0 else 0
        else:
            fps = float(r_frame_rate)

        # 计算帧数
        frame_count = int(duration * fps) if duration > 0 and fps > 0 else 0

        # 验证基本属性
        if duration <= 0 or fps <= 0:
            raise VideoInfoError(f"Invalid video properties: duration={duration}, fps={fps}")

        return {
            'fps': fps,
            'frame_count': frame_count,
            'width': width,
            'height': height,
            'duration': duration
        }

    async def _get_video_info_opencv(self, video_path: str) -> Dict[str, Any]:
        """使用OpenCV获取视频信息（备用方法）"""
        try:
            import cv2

            cap = None
            try:
                cap = cv2.VideoCapture(video_path)
                if not cap.isOpened():
                    raise VideoInfoError(f"Cannot open video file: {video_path}")

                info = {
                    'fps': cap.get(cv2.CAP_PROP_FPS),
                    'frame_count': int(cap.get(cv2.CAP_PROP_FRAME_COUNT)),
                    'width': int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                    'height': int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                }

                # 验证基本属性
                if info['fps'] <= 0 or info['frame_count'] <= 0:
                    raise VideoInfoError(f"Invalid video properties: fps={info['fps']}, frames={info['frame_count']}")

                info['duration'] = info['frame_count'] / info['fps']
                return info

            finally:
                if cap is not None:
                    cap.release()

        except VideoInfoError:
            # 重新抛出我们的自定义异常
            raise
        except Exception as e:
            self.logger.error(f"Unexpected error getting video info: {e}")
            raise VideoInfoError(f"Failed to get video info: {e}") from e

    async def _generate_segment_files(self,
                                     segments: List[VideoSegment],
                                     video_path: str,
                                     output_dir: str):
        """生成切分后的视频文件"""
        try:
            output_path = Path(output_dir)
            output_path.mkdir(parents=True, exist_ok=True)

            video_name = Path(video_path).stem
            format_ext = self.config['output_format']

            # 使用FFmpeg进行高质量切分 - 一刀切批量处理
            ffmpeg = injector.get(FfmpegSliceService)

            # 准备批量切片的配置
            from src.services.ffmpeg_slice_service import SliceSegment

            slice_segments = []
            for segment in segments:
                output_file = output_path / f"{video_name}_segment_{segment.segment_id:03d}.{format_ext}"
                segment.file_path = str(output_file)

                # 创建FFmpeg切片段配置
                slice_segments.append(SliceSegment(
                    start=segment.start_time,
                    end=segment.start_time + segment.duration
                ))

            # 创建高质量输出选项
            options = ffmpeg.create_slice_options(
                quality="high",
                fps=30
            )

            # 一刀切：批量处理所有片段
            base_output_path = str(output_path / f"{video_name}_segment.{format_ext}")

            try:
                results = await ffmpeg.slice_video(
                    media_path=video_path,
                    segments=slice_segments,
                    options=options,
                    output_path=base_output_path
                )

                # 更新片段文件路径为实际生成的文件
                # 使用临时文件名避免重命名冲突
                temp_files = []

                # 第一步：将所有文件重命名为临时文件名
                for i, (actual_file_path, metadata) in enumerate(results):
                    if i < len(segments):
                        temp_path = f"{actual_file_path}.tmp_{i}"
                        import shutil
                        shutil.move(actual_file_path, temp_path)
                        temp_files.append((temp_path, metadata, i))
                        self.logger.debug(f"临时重命名: {actual_file_path} -> {temp_path}")

                # 第二步：将临时文件重命名为最终文件名
                for temp_path, metadata, i in temp_files:
                    expected_path = segments[i].file_path
                    shutil.move(temp_path, expected_path)
                    self.logger.debug(f"最终重命名: {temp_path} -> {expected_path}")

                    # 更新片段元数据
                    segments[i].file_path = expected_path
                    self.logger.debug(f"批量切片完成片段{i+1}: {Path(expected_path).name} "
                                    f"(时长: {metadata.duration:.2f}s)")

                self.logger.info(f"FFmpeg批量切片完成: 一次性处理{len(segments)}个片段")

            except Exception as e:
                self.logger.error(f"FFmpeg批量切片失败: {e}")
                # 直接抛出异常，不进行降级处理
                raise RuntimeError(f"视频批量切片失败: {e}") from e

            self.logger.info(f"Generated {len(segments)} segment files")

        except Exception as e:
            self.logger.error(f"Error generating segment files: {e}")
            raise

    async def _extract_segment_ffmpeg(self,
                                     input_path: str,
                                     output_path: Path,
                                     start_time: float,
                                     duration: float):
        """使用FFmpeg提取视频片段"""
        try:
            from src.services.ffmpeg_slice_service import SliceSegment, SliceOptions

            ffmpeg = injector.get(FfmpegSliceService)

            # 创建切片段配置
            end_time = start_time + duration
            segments = [SliceSegment(start=start_time, end=end_time)]

            # 创建输出选项 - 使用高质量设置
            options = ffmpeg.create_slice_options(
                quality="high",
                fps=30
            )

            # 确保输出目录存在
            output_path.parent.mkdir(parents=True, exist_ok=True)

            # 使用FFmpeg切片服务提取片段
            results = await ffmpeg.slice_video(
                media_path=input_path,
                segments=segments,
                options=options,
                output_path=str(output_path)
            )

            if not results:
                raise RuntimeError("FFmpeg切片失败：没有生成输出文件")

            # 获取第一个（也是唯一的）输出文件
            output_file_path, metadata = results[0]

            # 如果输出文件路径与期望路径不同，需要重命名
            if output_file_path != str(output_path):
                import shutil
                shutil.move(output_file_path, str(output_path))
                self.logger.debug(f"重命名输出文件: {output_file_path} -> {output_path}")

            # 验证输出文件
            if not output_path.exists() or output_path.stat().st_size == 0:
                raise RuntimeError(f"输出文件无效: {output_path}")

            self.logger.debug(f"FFmpeg成功提取片段: {output_path} "
                            f"(时长: {metadata.duration:.2f}s, 大小: {output_path.stat().st_size / (1024*1024):.2f}MB)")

        except Exception as e:
            self.logger.error(f"Error extracting segment with FFmpeg: {e}")
            raise

    async def _verify_video_file(self, video_path: Path) -> bool:
        """验证视频文件是否有效"""
        try:
            import cv2

            if not video_path.exists() or video_path.stat().st_size == 0:
                return False

            # 尝试打开视频文件
            cap = cv2.VideoCapture(str(video_path))

            # 检查是否能成功打开
            if not cap.isOpened():
                cap.release()
                return False

            # 检查基本属性
            frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            fps = cap.get(cv2.CAP_PROP_FPS)
            width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

            cap.release()

            # 验证基本属性是否合理
            if frame_count <= 0 or fps <= 0 or width <= 0 or height <= 0:
                return False

            return True

        except Exception as e:
            self.logger.debug(f"Video verification failed: {e}")
            return False

    async def save_segmentation_result(self,
                                      result: SegmentationResult,
                                      output_dir: str):
        """保存切分结果"""
        try:
            output_path = Path(output_dir)
            output_path.mkdir(parents=True, exist_ok=True)

            video_name = Path(result.video_path).stem

            # 保存JSON格式结果
            json_file = output_path / f"{video_name}_segmentation.json"
            result_dict = asdict(result)

            with open(json_file, 'w', encoding='utf-8') as f:
                json.dump(result_dict, f, indent=2, ensure_ascii=False)

            # 保存M3U8播放列表
            m3u8_file = output_path / f"{video_name}_playlist.m3u8"
            await self._create_m3u8_playlist(result, m3u8_file)

            self.logger.info(f"Segmentation result saved to {output_path}")

        except Exception as e:
            self.logger.error(f"Error saving segmentation result: {e}")
            raise

    async def _create_m3u8_playlist(self, result: SegmentationResult, output_file: Path):
        """创建M3U8播放列表文件"""
        try:
            with open(output_file, 'w', encoding='utf-8') as f:
                f.write("#EXTM3U\n")
                f.write("#EXT-X-VERSION:3\n")
                f.write("#EXT-X-TARGETDURATION:60\n")
                f.write("#EXT-X-MEDIA-SEQUENCE:0\n")

                for segment in result.segments:
                    f.write(f"#EXTINF:{segment.duration:.3f},\n")
                    if segment.file_path:
                        segment_filename = Path(segment.file_path).name
                    else:
                        segment_filename = f"segment_{segment.segment_id:03d}.mp4"
                    f.write(f"{segment_filename}\n")

                f.write("#EXT-X-ENDLIST\n")

        except Exception as e:
            self.logger.error(f"Error creating M3U8 playlist: {e}")
            raise

    def cleanup(self):
        """清理资源"""
        try:
            self.shot_detection_service.cleanup()
            self.logger.info("Video segmentation service cleaned up")
        except Exception as e:
            self.logger.error(f"Error during cleanup: {e}")

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.cleanup()
        return False  # 不抑制异常
