import os
import shutil
import math
from datetime import datetime
from typing import List, Optional, Dict, Any
from src.modules.ffmpeg_command_builder import FFmpegCommandBuilder
from src.modules.media_processor import MediaProcessor
from src.modules.file_manager import FileManager
from src.utils.logger import log_info, log_error


class AdvancedVideoMerger:
    """
    使用 FFmpegCommandBuilder 的高级视频合并器
    将所有操作合成为一个 FFmpeg 指令，然后一次性执行
    """
    
    def __init__(self, transition_effect="fade", transition_duration=1.0, startupinfo=None, logger=None):
        """
        初始化高级视频合并器
        
        Args:
            transition_effect: 过渡效果类型
            transition_duration: 过渡效果持续时间（秒）
            startupinfo: Windows subprocess startupinfo
            logger: 日志记录器
        """
        self.startupinfo = startupinfo
        self.logger = logger
        self.transition_effect = transition_effect
        self.transition_duration = transition_duration
        
        # 初始化依赖组件
        self.media_processor = MediaProcessor(startupinfo, logger)
        self.file_manager = FileManager(logger=logger)
        
        # 操作记录和状态
        self.operations = []  # 记录所有要执行的操作
        self.temp_files = []  # 临时文件列表
    
    def _log(self, message, level="info"):
        """统一日志记录"""
        if self.logger:
            self.logger(message, level=level)
        elif level == "error":
            log_error(message)
        else:
            log_info(message)
    
    def _create_temp_path(self, prefix="", suffix=".mp4"):
        """创建临时文件路径"""
        from tempfile import mkdtemp, mkstemp
        temp_dir = mkdtemp(prefix="video_merger_")
        fd, temp_path = mkstemp(prefix=prefix, suffix=suffix, dir=temp_dir)
        os.close(fd)
        self.temp_files.append(temp_path)
        return temp_path
    
    def cleanup(self):
        """清理所有临时文件"""
        for temp_file in self.temp_files:
            try:
                if os.path.exists(temp_file):
                    os.remove(temp_file)
                # 同时清理临时目录
                temp_dir = os.path.dirname(temp_file)
                if os.path.exists(temp_dir) and not os.listdir(temp_dir):
                    os.rmdir(temp_dir)
            except Exception as e:
                self._log(f"清理临时文件失败: {temp_file}, 错误: {e}", level="error")
        
        self.temp_files.clear()
        self.file_manager.cleanup_category("video_merger")
    
    def insert_video_at(self, base_video_path, insert_video_path, insert_seconds):
        """
        在指定时间点插入视频
        
        Args:
            base_video_path: 基础视频文件路径
            insert_video_path: 要插入的视频文件路径
            insert_seconds: 插入位置的秒数
            
        Returns:
            操作结果字典
        """
        try:
            if not os.path.exists(base_video_path):
                raise FileNotFoundError(f"基础视频文件不存在: {base_video_path}")
            if not os.path.exists(insert_video_path):
                raise FileNotFoundError(f"要插入的视频文件不存在: {insert_video_path}")
            
            # 创建临时输出路径
            output_path = self._create_temp_path(prefix="inserted_", suffix=".mp4")
            
            # 使用 FFmpegCommandBuilder 构建插入命令
            builder = FFmpegCommandBuilder(startupinfo=self.startupinfo, logger=self.logger)
            builder.add_input(base_video_path)
            builder.add_input(insert_video_path)
            
            # 获取基础视频的分辨率作为目标分辨率
            base_resolution = self.media_processor.get_video_resolution(base_video_path)
            if not base_resolution:
                raise ValueError("无法获取基础视频分辨率")
            
            target_width, target_height = base_resolution
            self._log(f"目标分辨率: {target_width}x{target_height}")
            
            # 检查音频流
            base_has_audio = self.media_processor.has_audio_stream(base_video_path)
            insert_has_audio = self.media_processor.has_audio_stream(insert_video_path)
            self._log(f"基础视频音频: {'有' if base_has_audio else '无'}, 插入视频音频: {'有' if insert_has_audio else '无'}")
            
            # 构建滤镜表达式 - 同时处理视频和音频
            if base_has_audio or insert_has_audio:
                # 有音频的情况
                filter_expr = (
                    # 视频处理
                    f"[0:v]trim=end={insert_seconds},setpts=PTS-STARTPTS,scale={target_width}:{target_height}:force_original_aspect_ratio=decrease,pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2[base_v];"
                    f"[0:v]trim=start={insert_seconds},setpts=PTS-STARTPTS,scale={target_width}:{target_height}:force_original_aspect_ratio=decrease,pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2[after_v];"
                    f"[1:v]setpts=PTS-STARTPTS,scale={target_width}:{target_height}:force_original_aspect_ratio=decrease,pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2[inserted_v];"
                    # 音频处理
                    f"[0:a]atrim=end={insert_seconds},asetpts=PTS-STARTPTS[base_a];"
                    f"[0:a]atrim=start={insert_seconds},asetpts=PTS-STARTPTS[after_a];"
                    f"[1:a]asetpts=PTS-STARTPTS[inserted_a];"
                    # 合并视频和音频
                    f"[base_v][inserted_v][after_v]concat=n=3:v=1:a=0[vout];"
                    f"[base_a][inserted_a][after_a]concat=n=3:v=0:a=1[aout]"
                )
                
                builder.add_filter_complex(filter_expr)
                builder.add_map("[vout]")
                builder.add_map("[aout]")
            else:
                # 没有音频的情况，只处理视频
                filter_expr = (
                    f"[0:v]trim=end={insert_seconds},setpts=PTS-STARTPTS,scale={target_width}:{target_height}:force_original_aspect_ratio=decrease,pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2[base];"
                    f"[0:v]trim=start={insert_seconds},setpts=PTS-STARTPTS,scale={target_width}:{target_height}:force_original_aspect_ratio=decrease,pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2[after];"
                    f"[1:v]setpts=PTS-STARTPTS,scale={target_width}:{target_height}:force_original_aspect_ratio=decrease,pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2[inserted];"
                    f"[base][inserted][after]concat=n=3:v=1:a=0[vout]"
                )
                
                builder.add_filter_complex(filter_expr)
                builder.add_map("[vout]")
            
            # 设置编码参数
            builder.set_video_codec("libx264", preset="medium", crf=23)
            builder.set_audio_codec("aac", bitrate="192k")
            
            # 添加输出路径
            builder.add_output(output_path, overwrite=True)
            
            # 执行命令
            self._log("开始执行视频插入命令...")
            cmd_str = ' '.join(builder.build_command())
            self._log(f"生成的FFmpeg命令: {cmd_str}")
            result = builder.execute("视频插入操作")
            
            if result["success"]:
                self._log(f"视频插入成功: {output_path}")
                return output_path
            return result
        except Exception as e:
            error_msg = f"视频插入失败: {str(e)}"
            raise ValueError(error_msg)

    def merge_videos_with_transitions(self, *video_paths: str, output_path: Optional[str] = None, 
                                    remove_voice: bool = False, target_width: int = None, 
                                    target_height: int = None) -> Dict[str, Any]:
        """
        使用单个 FFmpeg 指令合并多个视频并添加过渡效果
        
        Args:
            video_paths: 输入视频文件路径
            output_path: 输出文件路径
            remove_voice: 是否移除人声
            target_width: 目标宽度
            target_height: 目标高度
            
        Returns:
            操作结果字典
        """
        try:
            # 基本验证
            if not video_paths:
                raise ValueError("至少需要一个视频文件路径")
            
            for path in video_paths:
                if not os.path.exists(path):
                    raise FileNotFoundError(f"视频文件不存在: {path}")
            
            # 设置输出路径
            base_dir = os.path.dirname(video_paths[0])
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            if not output_path:
                output_path = os.path.join(base_dir, f"merged_{timestamp}.mp4")
            
            # 单个视频直接处理
            if len(video_paths) == 1:
                self._log("只有一个视频，直接复制")
                shutil.copy(video_paths[0], output_path)
                if remove_voice:
                    remove_voice_path = os.path.join(base_dir, f"merged_{timestamp}_no_voice.mp4")  
                    _result = self._remove_vocals_from_video(output_path, remove_voice_path)
                    shutil.move(remove_voice_path, output_path)
                    return _result
                
                return {"success": True, "message": "单视频处理完成", "output_path": output_path}
            
            self._log(f"开始合并 {len(video_paths)} 个视频")
            self._log(f"过渡效果: {self.transition_effect}, 持续时间: {self.transition_duration}秒")
            
            # 分析所有视频信息
            video_info = self._analyze_videos(video_paths)
            
            # 确定目标分辨率
            if not target_width or not target_height:
                target_width = max(info['resolution'][0] for info in video_info)
                target_height = max(info['resolution'][1] for info in video_info)
            
            self._log(f"目标分辨率: {target_width}x{target_height}")
            
            # 使用 FFmpegCommandBuilder 构建合并命令
            result = self._build_merge_command(video_paths, video_info, output_path, 
                                             target_width, target_height, remove_voice)
            
            return result
            
        except Exception as e:
            error_msg = f"视频合并失败: {str(e)}"
            self._log(error_msg, level="error")
            return {"success": False, "message": error_msg}
    
    def _analyze_videos(self, video_paths: List[str]) -> List[Dict[str, Any]]:
        """分析所有视频的详细信息"""
        video_info = []
        
        for i, video_path in enumerate(video_paths):
            self._log(f"分析视频 #{i+1}: {os.path.basename(video_path)}")
            
            # 获取分辨率
            resolution = self.media_processor.get_video_resolution(video_path)
            if not resolution:
                raise ValueError(f"无法获取视频 #{i+1} 分辨率")
            
            # 获取时长
            duration = self.media_processor.get_media_duration(video_path)
            if duration is None:
                raise ValueError(f"无法获取视频 #{i+1} 时长")
            
            # 检查音频
            has_audio = self.media_processor.has_audio_stream(video_path)
            
            info = {
                'path': video_path,
                'resolution': resolution,
                'duration': duration,
                'has_audio': has_audio,
                'index': i
            }
            
            video_info.append(info)
            self._log(f"视频 #{i+1} - 分辨率: {resolution[0]}x{resolution[1]}, "
                     f"时长: {duration:.2f}s, 音频: {'有' if has_audio else '无'}")
        
        return video_info
    
    def _build_merge_command(self, video_paths: List[str], video_info: List[Dict[str, Any]], 
                            output_path: str, target_width: int, target_height: int, 
                            remove_voice: bool) -> Dict[str, Any]:
        """构建完整的合并命令"""
        
        builder = FFmpegCommandBuilder(startupinfo=self.startupinfo, logger=self.logger)
        
        # 添加所有输入视频
        for video_path in video_paths:
            builder.add_input(video_path)
        
        # 构建复杂滤镜表达式
        filter_complex_parts = []
        
        # 第一步：标准化所有视频和音频
        for i, info in enumerate(video_info):
            # 视频标准化 - 保持宽高比并居中填充
            video_filter = (f"[{i}:v]fps=30,"
                           f"scale={target_width}:{target_height}:force_original_aspect_ratio=decrease,"
                           f"pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2,"
                           f"setsar=1:1,format=yuv420p[v{i}]")
            filter_complex_parts.append(video_filter)
            
            # 音频标准化
            if info['has_audio']:
                if remove_voice:
                    # 如果需要移除人声，使用Pan滤镜进行简单的人声抑制
                    audio_filter = (f"[{i}:a]pan=mono|c0=0.5*c0+-0.5*c1,"
                                   f"aformat=sample_rates=44100:channel_layouts=stereo[a{i}]")
                else:
                    audio_filter = f"[{i}:a]aformat=sample_rates=44100:channel_layouts=stereo[a{i}]"
                filter_complex_parts.append(audio_filter)
            else:
                # 为没有音频的视频生成静音轨道
                silence_filter = f"anullsrc=r=44100:cl=stereo:d={info['duration']}[a{i}]"
                filter_complex_parts.append(silence_filter)
        
        # 第二步：应用过渡效果（如果需要）
        final_video_label = self._build_video_transitions(filter_complex_parts, video_info)
        final_audio_label = self._build_audio_mixing(filter_complex_parts, video_info, remove_voice)
        
        # 添加完整的filter_complex
        full_filter = ";".join(filter_complex_parts)
        builder.add_filter_complex(full_filter)
        
        # 映射最终的视频和音频流
        builder.add_map(final_video_label)
        builder.add_map(final_audio_label)
        
        # 设置编码参数
        builder.set_video_codec("libx264", preset="medium", crf=23)
        builder.set_audio_codec("aac", bitrate="192k")
        
        # 添加额外的编码选项
        builder.add_global_option("-pix_fmt", "yuv420p")
        builder.add_global_option("-profile:v", "main")
        builder.add_global_option("-level", "4.0")
        builder.add_global_option("-ar", "44100")
        builder.add_global_option("-movflags", "+faststart")
        builder.add_global_option("-max_muxing_queue_size", "1024")
        
        # 设置输出
        builder.add_output(output_path, overwrite=True)
        
        # 执行命令
        self._log("开始执行合并命令...")
        result = builder.execute("视频过渡合并")
        
        if result["success"]:
            self._log(f"视频合并成功: {output_path}")
        
        return result
    
    def _build_video_transitions(self, filter_parts: List[str], video_info: List[Dict[str, Any]]) -> str:
        """构建视频过渡效果滤镜"""
        if len(video_info) == 1:
            return "[v0]"
        
        if not self.transition_effect or self.transition_duration <= 0:
            # 使用简单串联
            video_streams = "".join([f"[v{i}]" for i in range(len(video_info))])
            concat_filter = f"{video_streams}concat=n={len(video_info)}:v=1:a=0:unsafe=1[vout]"
            filter_parts.append(concat_filter)
            return "[vout]"
        
        # 应用过渡效果
        current_label = "[v0]"
        
        for i in range(1, len(video_info)):
            # 计算过渡的offset
            if i == 1:
                # 第一个过渡
                offset = video_info[0]['duration'] - self.transition_duration
            else:
                # 后续过渡需要考虑之前的累计时长
                cumulative_duration = sum(info['duration'] for info in video_info[:i])
                cumulative_duration -= (i - 1) * self.transition_duration  # 减去之前的过渡时间
                offset = cumulative_duration - self.transition_duration
            
            next_label = f"[vtmp{i}]" if i < len(video_info) - 1 else "[vout]"
            
            transition_filter = (f"{current_label}[v{i}]xfade=transition={self.transition_effect}:"
                               f"duration={self.transition_duration}:offset={offset:.3f}{next_label}")
            filter_parts.append(transition_filter)
            current_label = next_label
        
        return "[vout]"
    
    def _build_audio_mixing(self, filter_parts: List[str], video_info: List[Dict[str, Any]], 
                           remove_voice: bool) -> str:
        """构建音频混合滤镜"""
        if len(video_info) == 1:
            return "[a0]"
        
        # 计算最终音频长度
        if self.transition_effect and self.transition_duration > 0:
            total_duration = sum(info['duration'] for info in video_info)
            total_duration -= (len(video_info) - 1) * self.transition_duration
        else:
            total_duration = sum(info['duration'] for info in video_info)
        
        # 串联所有音频流
        audio_streams = "".join([f"[a{i}]" for i in range(len(video_info))])
        audio_concat = f"{audio_streams}concat=n={len(video_info)}:v=0:a=1:unsafe=1[atmp]"
        filter_parts.append(audio_concat)
        
        # 精确控制时长
        audio_trim = f"[atmp]atrim=0:{total_duration:.3f}[aout]"
        filter_parts.append(audio_trim)
        
        return "[aout]"
    
    def merge_video_with_audio(self, video_path: str, audio_path: str, output_path: str,
                              subtitle_path: Optional[str] = None, burn_subtitles: bool = True,
                              audio_volume: float = 1.0, original_volume: float = 0.8) -> Dict[str, Any]:
        """
        使用单个 FFmpeg 指令合并视频、音频和字幕，包含完整的音视频长度智能处理
        
        Args:
            video_path: 视频文件路径
            audio_path: 音频文件路径  
            output_path: 输出文件路径
            subtitle_path: 字幕文件路径（可选）
            burn_subtitles: 是否烧录字幕
            audio_volume: 新音频音量
            original_volume: 原始音频音量
            
        Returns:
            操作结果字典
        """
        try:
            # 文件验证
            if not os.path.exists(video_path):
                raise FileNotFoundError(f"视频文件不存在: {video_path}")
            if not os.path.exists(audio_path):
                raise FileNotFoundError(f"音频文件不存在: {audio_path}")
            if subtitle_path and not os.path.exists(subtitle_path):
                self._log(f"字幕文件不存在，将忽略: {subtitle_path}")
                subtitle_path = None
            
            self._log("开始合并视频、音频和字幕")
            
            # 获取视频信息
            video_duration = self.media_processor.get_media_duration(video_path)
            audio_duration = self.media_processor.get_media_duration(audio_path)
            has_original_audio = self.media_processor.has_audio_stream(video_path)
            
            if video_duration is None or audio_duration is None:
                raise ValueError("无法获取视频或音频时长")
                
            self._log(f"视频时长: {video_duration:.2f}s, 音频时长: {audio_duration:.2f}s")
            self._log(f"原始视频包含音频: {'是' if has_original_audio else '否'}")
            
            # 时长对比分析和智能调整策略
            duration_diff = video_duration - audio_duration
            self._log(f"时长对比: 视频={video_duration:.2f}s, 音频={audio_duration:.2f}s, 差值={duration_diff:.2f}s")
            
            # 智能音量分析和调整
            adjusted_audio_volume = audio_volume
            adjusted_original_volume = original_volume
            
            if has_original_audio:
                # 分析原视频和新音频的音量
                voice_audio_info = self._analyze_audio_levels(audio_path)
                video_audio_info = self._analyze_audio_levels(video_path)
                
                if voice_audio_info and video_audio_info:
                    voice_mean = voice_audio_info.get('mean_volume')
                    music_mean = video_audio_info.get('mean_volume')
                    
                    if voice_mean is not None and music_mean is not None:
                        volume_diff = music_mean - voice_mean
                        self._log(f"音量差异分析: 音乐({music_mean:.1f}dB) vs 人声({voice_mean:.1f}dB), 差值: {volume_diff:.1f}dB")
                        
                        # 根据分析结果智能调整音量参数
                        if voice_mean < -40:  # 人声极弱
                            if volume_diff > 10:  # 音乐远大于人声
                                adjusted_audio_volume = 4.0
                                adjusted_original_volume = 0.1
                                self._log("情况1: 人声极弱且音乐远大于人声 - 大幅提高人声，大幅降低音乐")
                            else:
                                adjusted_audio_volume = 3.5
                                adjusted_original_volume = 0.15
                                self._log("情况2: 人声极弱但音乐不太强 - 较大幅提高人声，较大幅降低音乐")
                        elif voice_mean < -30:  # 人声较弱
                            if volume_diff > 10:
                                adjusted_audio_volume = 3.0
                                adjusted_original_volume = 0.2
                                self._log("情况3: 人声较弱且音乐远大于人声 - 显著提高人声，显著降低音乐")
                            else:
                                adjusted_audio_volume = 2.5
                                adjusted_original_volume = 0.25
                                self._log("情况4: 人声较弱但音乐不太强 - 适当提高人声，适当降低音乐")
                        elif voice_mean < -20:  # 人声中等
                            if volume_diff > 5:
                                adjusted_audio_volume = 2.0
                                adjusted_original_volume = 0.3
                                self._log("情况5: 人声中等且音乐略大于人声 - 中度提高人声，中度降低音乐")
                            else:
                                adjusted_audio_volume = 1.5
                                adjusted_original_volume = 0.4
                                self._log("情况6: 人声中等且音乐适中 - 轻度调整")
                        else:  # 人声较强
                            adjusted_audio_volume = 1.2
                            adjusted_original_volume = 0.5
                            self._log("情况7: 人声较强 - 轻微调整保持平衡")
            
            # 创建临时目录用于可能的预处理
            from tempfile import mkdtemp
            temp_dir = mkdtemp(prefix="merge_temp_")
            
            # 根据时长差异决定处理策略
            processed_video_path = video_path
            processed_audio_path = audio_path
            
            if abs(duration_diff) > 1.0:  # 时长差异超过1秒需要调整
                self._log(f"时长差异较大({abs(duration_diff):.2f}s)，进行智能调整")
                
                if duration_diff > 0:  # 视频比音频长
                    self._log(f"视频比音频长 {duration_diff:.2f}s")
                    speed_factor = video_duration / audio_duration
                    
                    if speed_factor <= 2.5:  # 合理的加速范围
                        self._log(f"视频加速比例: {speed_factor:.2f}x，在合理范围内")
                        # 加速视频以匹配音频长度，并添加淡出效果
                        processed_video_path = self._speed_up_video_with_fade(
                            video_path, audio_duration, temp_dir, speed_factor
                        )
                    else:  # 速度比例过高，限制后截断
                        self._log(f"视频加速比例过高({speed_factor:.2f}x)，限制为2.5x并截断")
                        processed_video_path = self._speed_up_and_trim_video(
                            video_path, audio_duration, temp_dir, max_speed=2.5
                        )
                        
                else:  # 音频比视频长
                    self._log(f"音频比视频长 {abs(duration_diff):.2f}s")
                    repeat_count = math.ceil(audio_duration / video_duration)
                    self._log(f"需要重复视频 {repeat_count} 次并添加过渡效果")
                    
                    # 重复视频并添加过渡效果以匹配音频长度
                    processed_video_path = self._repeat_video_with_transitions(
                        video_path, audio_duration, temp_dir, self.transition_effect
                    )
            
            # 使用 FFmpegCommandBuilder 构建最终合并命令
            builder = FFmpegCommandBuilder(startupinfo=self.startupinfo, logger=self.logger)
            
            # 添加输入
            builder.add_input(processed_video_path)
            builder.add_input(processed_audio_path)
            
            # 构建滤镜
            filter_parts = []
            
            # 获取处理后的视频时长
            new_video_duration = self.media_processor.get_media_duration(processed_video_path)
            
            # 智能音频处理
            if has_original_audio:
                # 有原始音频，需要混合
                filter_parts.append(f"[0:a]volume={adjusted_original_volume}[original_audio]")
                
                # 处理新音频长度匹配和音量
                if audio_duration < new_video_duration:
                    # 音频需要填充
                    padding_seconds = new_video_duration - audio_duration
                    self._log(f"音频比视频短 {padding_seconds:.2f}s，将自动添加静音填充")
                    filter_parts.append(f"[1:a]apad=whole_dur={new_video_duration:.3f},volume={adjusted_audio_volume}[new_audio]")
                elif audio_duration > new_video_duration:
                    # 音频需要裁剪并添加淡出
                    fade_duration = min(1.0, new_video_duration / 4)
                    fade_start = max(0, new_video_duration - fade_duration)
                    filter_parts.append(f"[1:a]atrim=0:{new_video_duration:.3f},afade=t=out:st={fade_start:.3f}:d={fade_duration:.3f},volume={adjusted_audio_volume}[new_audio]")
                    self._log(f"音频比视频长，裁剪到 {new_video_duration:.2f}s 并添加 {fade_duration:.2f}s 淡出")
                else:
                    filter_parts.append(f"[1:a]volume={adjusted_audio_volume}[new_audio]")
                
                # 混合音频，使用智能持续时间控制
                filter_parts.append("[original_audio][new_audio]amix=inputs=2:duration=longest[final_audio]")
                final_audio_map = "[final_audio]"
            else:
                # 没有原始音频，直接使用新音频
                if audio_duration < new_video_duration:
                    # 填充音频
                    padding_seconds = new_video_duration - audio_duration
                    self._log(f"音频比视频短 {padding_seconds:.2f}s，添加静音填充")
                    filter_parts.append(f"[1:a]apad=whole_dur={new_video_duration:.3f},volume={adjusted_audio_volume}[final_audio]")
                elif audio_duration > new_video_duration:
                    # 裁剪音频并添加淡出
                    fade_duration = min(1.0, new_video_duration / 4)
                    fade_start = max(0, new_video_duration - fade_duration)
                    filter_parts.append(f"[1:a]atrim=0:{new_video_duration:.3f},afade=t=out:st={fade_start:.3f}:d={fade_duration:.3f},volume={adjusted_audio_volume}[final_audio]")
                    self._log(f"音频比视频长，裁剪到 {new_video_duration:.2f}s 并添加 {fade_duration:.2f}s 淡出")
                else:
                    filter_parts.append(f"[1:a]volume={adjusted_audio_volume}[final_audio]")
                final_audio_map = "[final_audio]"
            
            # 处理视频和字幕
            if subtitle_path and burn_subtitles:
                # 烧录字幕 - 使用更简单的方法避免转义问题
                try:
                    # 尝试将字幕文件复制到临时目录，使用简单的文件名
                    temp_subtitle = os.path.join(temp_dir, "subtitle.srt")
                    shutil.copy2(subtitle_path, temp_subtitle)
                    
                    # 转换为正斜杠并正确转义
                    temp_subtitle_path = temp_subtitle.replace(os.sep, '/').replace(':', '\\:')
                    
                    # 构建字幕滤镜，注意转义和引号
                    subtitle_filter = f"[0:v]subtitles='{temp_subtitle_path}':force_style='FontSize=12,MarginV=20'[final_video]"
                    filter_parts.append(subtitle_filter)
                    final_video_map = "[final_video]"
                    self._log(f"字幕滤镜: {subtitle_filter}")
                except Exception as e:
                    self._log(f"字幕处理失败，跳过字幕烧录: {str(e)}", level="error")
                    final_video_map = "[0:v]"
            else:
                # 不处理字幕，直接使用原始视频
                final_video_map = "[0:v]"
            
            # 添加滤镜到builder
            if filter_parts:
                builder.add_filter_complex(";".join(filter_parts))
                builder.add_map(final_video_map)
                builder.add_map(final_audio_map)
            else:
                # 没有滤镜的情况
                builder.add_map("0:v:0")
                builder.add_map("1:a:0")
            
            # 设置编码
            if subtitle_path and burn_subtitles:
                builder.set_video_codec("libx264", preset="medium", crf=18)
            else:
                builder.add_global_option("-c:v", "copy")  # 不重编码视频
            
            builder.set_audio_codec("aac", bitrate="192k")
            
            # 避免输入输出同名问题
            if os.path.abspath(output_path) == os.path.abspath(processed_video_path):
                temp_output = output_path + ".tmp"
                builder.add_output(temp_output, overwrite=True)
            else:
                temp_output = output_path
                builder.add_output(output_path, overwrite=True)
            
            # 执行命令
            result = builder.execute("智能视频音频字幕合并")
            
            # 如果使用了临时输出文件，移动到最终位置
            if temp_output != output_path:
                if result["success"] and os.path.exists(temp_output):
                    if os.path.exists(output_path):
                        os.remove(output_path)
                    shutil.move(temp_output, output_path)
            
            if result["success"]:
                self._log(f"智能合并完成: {output_path}")
                
                # 验证输出文件
                if os.path.exists(output_path):
                    file_size = os.path.getsize(output_path) / (1024 * 1024)  # MB
                    final_duration = self.media_processor.get_media_duration(output_path)
                    self._log(f"输出文件大小: {file_size:.2f} MB, 时长: {final_duration:.2f}s")
            
            # 清理临时文件
            try:
                if os.path.exists(temp_dir):
                    shutil.rmtree(temp_dir)
            except:
                pass
            
            return result
            
        except Exception as e:
            error_msg = f"智能合并视频、音频和字幕失败: {str(e)}"
            self._log(error_msg, level="error")
            return {"success": False, "message": error_msg}
    
    def burn_subtitles_to_video(self, video_path: str, subtitle_path: str, 
                               output_path: Optional[str] = None, font_size: int = 12,
                               margin_v: int = 20, font_color: str = "white", 
                               font_name: Optional[str] = None) -> Dict[str, Any]:
        """
        使用单个 FFmpeg 指令将字幕烧录到视频
        
        Args:
            video_path: 视频文件路径
            subtitle_path: 字幕文件路径
            output_path: 输出文件路径
            font_size: 字体大小
            margin_v: 底部边距
            font_color: 字体颜色
            font_name: 字体名称
            
        Returns:
            操作结果字典
        """
        try:
            # 文件验证
            if not os.path.exists(video_path):
                raise FileNotFoundError(f"视频文件不存在: {video_path}")
            if not os.path.exists(subtitle_path):
                raise FileNotFoundError(f"字幕文件不存在: {subtitle_path}")
            
            # 设置输出路径
            if not output_path:
                output_path = self._create_temp_path("subtitled_", ".mp4")
            
            self._log(f"开始烧录字幕: {os.path.basename(subtitle_path)}")
            
            # 使用 FFmpegCommandBuilder
            builder = FFmpegCommandBuilder(startupinfo=self.startupinfo, logger=self.logger)
            
            # 添加输入
            builder.add_input(video_path)
            
            # 构建字幕滤镜 - 使用更简单的方法避免转义问题
            try:
                # 创建临时目录并复制字幕文件，使用简单文件名
                import tempfile
                temp_dir = os.path.dirname(output_path)
                temp_subtitle = os.path.join(temp_dir, "temp_subtitle.srt")
                shutil.copy2(subtitle_path, temp_subtitle)
                
                # 构建样式
                style_parts = [f"FontSize={font_size}", f"MarginV={margin_v}"]
                if font_color:
                    color_hex = self._color_to_hex(font_color)
                    style_parts.append(f"PrimaryColour=&H{color_hex}")
                if font_name:
                    style_parts.append(f"FontName={font_name}")
                
                style = ",".join(style_parts)
                # 使用简单路径，避免复杂的转义
                subtitle_filter = f"subtitles='{temp_subtitle.replace(chr(92), '/')}':force_style='{style}'"
                
                builder.add_video_filter(subtitle_filter)
                
                # 清理临时字幕文件的函数
                def cleanup_temp_subtitle():
                    try:
                        if os.path.exists(temp_subtitle):
                            os.remove(temp_subtitle)
                    except:
                        pass
                
            except Exception as e:
                self._log(f"字幕处理失败: {str(e)}", level="error")
                return {"success": False, "message": f"字幕处理失败: {str(e)}"}
                cleanup_temp_subtitle = lambda: None
            
            # 设置编码
            builder.set_video_codec("libx264", preset="medium", crf=18)
            builder.add_global_option("-c:a", "copy")  # 保留原始音频
            
            # 设置输出
            builder.add_output(output_path, overwrite=True)
            
            # 执行命令
            result = builder.execute("字幕烧录")
            
            # 清理临时字幕文件
            cleanup_temp_subtitle()
            
            if result["success"]:
                self._log(f"字幕烧录成功: {output_path}")
            
            return result
            
        except Exception as e:
            error_msg = f"字幕烧录失败: {str(e)}"
            self._log(error_msg, level="error")
            return {"success": False, "message": error_msg}
    
    def _color_to_hex(self, color: str) -> str:
        """将颜色名称转换为十六进制格式"""
        color_map = {
            "white": "FFFFFF",
            "black": "000000", 
            "red": "FF0000",
            "green": "00FF00",
            "blue": "0000FF",
            "yellow": "FFFF00",
            "cyan": "00FFFF",
            "magenta": "FF00FF"
        }
        
        if color.lower() in color_map:
            return color_map[color.lower()]
        
        if color.startswith('#') and len(color) in [4, 7]:
            return color[1:]
        
        return "FFFFFF"  # 默认白色
    
    def _remove_vocals_from_video(self, video_path: str, output_path: str) -> Dict[str, Any]:
        """
        使用简单的Pan滤镜移除人声（人声抑制）
        
        Args:
            video_path: 输入视频路径
            output_path: 输出视频路径
            
        Returns:
            操作结果字典
        """
        try:
            self._log("开始移除人声...")
            
            # 使用 FFmpegCommandBuilder
            builder = FFmpegCommandBuilder(startupinfo=self.startupinfo, logger=self.logger)
            
            # 添加输入
            builder.add_input(video_path)
            
            # 添加人声抑制滤镜
            # 使用Pan滤镜进行简单的人声抑制：左声道 - 右声道
            vocal_removal_filter = "pan=mono|c0=0.5*c0+-0.5*c1,pan=stereo|c0=c0|c1=c0"
            builder.add_audio_filter(vocal_removal_filter)
            
            # 保持视频不变
            builder.add_global_option("-c:v", "copy")
            
            # 设置音频编码
            builder.set_audio_codec("aac", bitrate="192k")
            
            # 设置输出
            builder.add_output(output_path, overwrite=True)
            
            # 执行命令
            result = builder.execute("人声移除")
            
            if result["success"]:
                self._log("人声移除完成")
            
            return result
            
        except Exception as e:
            error_msg = f"移除人声失败: {str(e)}"
            self._log(error_msg, level="error")
            return {"success": False, "message": error_msg}
    
    def _analyze_audio_levels(self, file_path: str) -> Optional[Dict[str, float]]:
        """
        分析音频的音量级别
        
        Args:
            file_path: 音频或视频文件路径
            
        Returns:
            包含音量信息的字典或None
        """
        try:
            import subprocess
            
            # 运行volumedetect滤镜分析音频
            cmd = [
                "ffmpeg",
                "-i", file_path,
                "-filter:a", "volumedetect",
                "-f", "null",
                "-y",
                "-"
            ]
            
            process = subprocess.run(
                cmd,
                startupinfo=self.startupinfo,
                stderr=subprocess.PIPE,
                stdout=subprocess.PIPE,
                universal_newlines=True,
                encoding='utf-8',
                errors='replace'
            )
            
            # 解析输出获取音量信息
            output = process.stderr
            mean_volume = None
            max_volume = None
            
            for line in output.split('\n'):
                if "mean_volume:" in line:
                    try:
                        mean_volume = float(line.split("mean_volume:")[1].strip().split()[0])
                    except (ValueError, IndexError):
                        pass
                elif "max_volume:" in line:
                    try:
                        max_volume = float(line.split("max_volume:")[1].strip().split()[0])
                    except (ValueError, IndexError):
                        pass
            
            result = {
                'mean_volume': mean_volume,
                'max_volume': max_volume
            }
            
            self._log(f"音频分析结果 - 平均音量: {mean_volume} dB, 最大音量: {max_volume} dB")
            return result
            
        except Exception as e:
            self._log(f"分析音频音量时出错: {str(e)}", level="error")
            return None
    
    def _speed_up_video_with_fade(self, video_path: str, target_duration: float, 
                                 temp_dir: str, speed_factor: float) -> str:
        """
        加速视频以匹配目标时长，并添加淡出效果
        
        Args:
            video_path: 原始视频路径
            target_duration: 目标时长
            temp_dir: 临时目录
            speed_factor: 加速倍数
            
        Returns:
            处理后的视频路径
        """
        try:
            import subprocess
            from tempfile import mkstemp
            
            # 创建临时输出文件
            fd, output_path = mkstemp(suffix=".mp4", dir=temp_dir)
            os.close(fd)
            
            # 构建处理音频速度的滤镜链（处理atempo限制）
            audio_filter = ""
            remaining_factor = speed_factor
            
            if remaining_factor > 1.0:
                audio_chunks = []
                while remaining_factor > 1.0:
                    chunk = min(2.0, remaining_factor)
                    audio_chunks.append(f"atempo={chunk}")
                    remaining_factor /= chunk
                audio_filter = ",".join(audio_chunks)
            else:
                audio_filter = f"atempo={remaining_factor}"
            
            # 计算淡出参数
            fade_duration = min(1.0, target_duration / 4)
            fade_start = max(0, target_duration - fade_duration)
            
            # 构建FFmpeg命令
            cmd = [
                "ffmpeg",
                "-i", video_path,
                "-filter_complex", 
                f"[0:v]setpts={1/speed_factor}*PTS,fade=t=out:st={fade_start}:d={fade_duration}:color=black[v];[0:a]{audio_filter},afade=t=out:st={fade_start}:d={fade_duration}[a]",
                "-map", "[v]",
                "-map", "[a]",
                "-c:v", "libx264",
                "-preset", "medium", 
                "-crf", "18",
                "-y",
                output_path
            ]
            
            self._log(f"执行视频加速命令，倍数: {speed_factor:.2f}x")
            process = subprocess.run(
                cmd, 
                startupinfo=self.startupinfo,
                stderr=subprocess.PIPE,
                stdout=subprocess.PIPE,
                universal_newlines=True
            )
            
            if process.returncode == 0:
                self._log(f"视频加速成功: {output_path}")
                return output_path
            else:
                self._log(f"视频加速失败: {process.stderr}", level="error")
                return video_path
                
        except Exception as e:
            self._log(f"视频加速处理出错: {str(e)}", level="error")
            return video_path
    
    def _speed_up_and_trim_video(self, video_path: str, target_duration: float,
                                temp_dir: str, max_speed: float = 2.5) -> str:
        """
        按最大倍数加速视频，然后截断到目标时长
        
        Args:
            video_path: 原始视频路径
            target_duration: 目标时长
            temp_dir: 临时目录
            max_speed: 最大加速倍数
            
        Returns:
            处理后的视频路径
        """
        try:
            import subprocess
            from tempfile import mkstemp
            
            # 首先按最大倍数加速
            fd1, sped_up_path = mkstemp(suffix="_speed.mp4", dir=temp_dir)
            os.close(fd1)
            
            # 构建音频加速滤镜
            audio_chunks = []
            remaining_factor = max_speed
            while remaining_factor > 1.0:
                chunk = min(2.0, remaining_factor)
                audio_chunks.append(f"atempo={chunk}")
                remaining_factor /= chunk
            audio_filter = ",".join(audio_chunks)
            
            # 加速命令
            speed_cmd = [
                "ffmpeg",
                "-i", video_path,
                "-filter_complex", 
                f"[0:v]setpts={1/max_speed}*PTS[v];[0:a]{audio_filter}[a]",
                "-map", "[v]",
                "-map", "[a]",
                "-c:v", "libx264",
                "-preset", "medium", 
                "-crf", "18",
                "-y",
                sped_up_path
            ]
            
            self._log(f"执行视频加速命令，最大倍数: {max_speed:.2f}x")
            process = subprocess.run(
                speed_cmd, 
                startupinfo=self.startupinfo,
                stderr=subprocess.PIPE,
                stdout=subprocess.PIPE,
                universal_newlines=True
            )
            
            if process.returncode != 0:
                self._log(f"视频加速失败: {process.stderr}", level="error")
                return video_path
            
            # 然后截断到目标时长并添加淡出
            fd2, final_path = mkstemp(suffix="_final.mp4", dir=temp_dir)
            os.close(fd2)
            
            fade_duration = min(1.0, target_duration / 4)
            fade_start = max(0, target_duration - fade_duration)
            
            trim_cmd = [
                "ffmpeg",
                "-i", sped_up_path,
                "-ss", "0",
                "-t", str(target_duration),
                "-vf", f"fade=t=out:st={fade_start}:d={fade_duration}:color=black",
                "-af", f"afade=t=out:st={fade_start}:d={fade_duration}",
                "-c:v", "libx264",
                "-preset", "medium", 
                "-crf", "18",
                "-y",
                final_path
            ]
            
            self._log(f"执行视频截断命令，目标时长: {target_duration:.2f}s")
            trim_process = subprocess.run(
                trim_cmd, 
                startupinfo=self.startupinfo,
                stderr=subprocess.PIPE,
                stdout=subprocess.PIPE,
                universal_newlines=True
            )
            
            if trim_process.returncode == 0:
                self._log(f"视频加速和截断成功: {final_path}")
                return final_path
            else:
                self._log(f"视频截断失败: {trim_process.stderr}", level="error")
                return sped_up_path
                
        except Exception as e:
            self._log(f"视频加速截断处理出错: {str(e)}", level="error")
            return video_path
    
    def _repeat_video_with_transitions(self, video_path: str, target_duration: float,
                                      temp_dir: str, transition_effect: str = "fade") -> str:
        """
        重复视频直到匹配目标时长，并添加过渡效果
        
        Args:
            video_path: 原始视频路径
            target_duration: 目标时长
            temp_dir: 临时目录
            transition_effect: 过渡效果类型
            
        Returns:
            处理后的视频路径
        """
        try:
            import subprocess
            from tempfile import mkstemp
            
            video_duration = self.media_processor.get_media_duration(video_path)
            if video_duration is None:
                return video_path
            
            # 计算需要重复的次数
            repeat_count = math.ceil(target_duration / video_duration)
            self._log(f"视频需要重复 {repeat_count} 次才能匹配音频时长")
            
            # 创建concat文件
            concat_file = os.path.join(temp_dir, "concat_list.txt")
            with open(concat_file, 'w', encoding='utf-8') as f:
                for _ in range(repeat_count):
                    f.write(f"file '{video_path.replace(chr(92), '/')}'\n")
            
            # 创建输出文件
            fd, output_path = mkstemp(suffix="_repeated.mp4", dir=temp_dir)
            os.close(fd)
            
            # 计算淡出参数
            fade_duration = min(1.0, target_duration / 4)
            fade_start = max(0, target_duration - fade_duration)
            
            # 获取过渡滤镜
            transition_filter = self._get_transition_filter(
                transition_effect, fade_start, fade_duration
            )
            
            # 构建命令：concat -> 裁剪 -> 添加特效
            cmd = [
                "ffmpeg",
                "-f", "concat",
                "-safe", "0",
                "-i", concat_file,
                "-ss", "0",
                "-t", str(target_duration),
                "-vf", transition_filter,
                "-af", f"afade=t=out:st={fade_start}:d={fade_duration}",
                "-c:v", "libx264",
                "-preset", "medium", 
                "-crf", "18",
                "-y",
                output_path
            ]
            
            self._log(f"执行视频重复和过渡效果命令")
            process = subprocess.run(
                cmd, 
                startupinfo=self.startupinfo,
                stderr=subprocess.PIPE,
                stdout=subprocess.PIPE,
                universal_newlines=True
            )
            
            # 清理concat文件
            try:
                os.remove(concat_file)
            except:
                pass
            
            if process.returncode == 0:
                self._log(f"视频重复和过渡效果添加成功: {output_path}")
                return output_path
            else:
                self._log(f"视频重复处理失败: {process.stderr}", level="error")
                return video_path
                
        except Exception as e:
            self._log(f"视频重复处理出错: {str(e)}", level="error")
            return video_path
    
    def _get_transition_filter(self, effect_type: str, start_time: float, duration: float) -> str:
        """
        根据过渡效果类型返回适当的FFmpeg滤镜
        
        Args:
            effect_type: 效果类型
            start_time: 开始时间
            duration: 持续时间
            
        Returns:
            FFmpeg滤镜字符串
        """
        if effect_type == "fade":
            return f"fade=t=out:st={start_time}:d={duration}:color=black"
        elif effect_type == "dissolve":
            return f"fade=t=out:st={start_time}:d={duration}:alpha=1"
        elif effect_type == "wipeleft":
            return f"crop=iw-iw*{duration}/(t-{start_time}):ih:0:0"
        elif effect_type == "wiperight":
            return f"crop=iw*{duration}/(t-{start_time}):ih:iw-iw*{duration}/(t-{start_time}):0"
        else:
            # 默认使用淡出
            return f"fade=t=out:st={start_time}:d={duration}:color=black"
