import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import tkinter as tk
from tkinter import filedialog, ttk, messagebox, Scale
from pydub import AudioSegment
import subprocess
import tempfile
import shutil
import platform
import matplotlib as mpl
import threading
import queue
import time
import configparser
import locale

# 设置语言
def setup_language():
    """配置语言支持"""
    # 获取系统语言
    system_lang = locale.getdefaultlocale()[0]
    
    # 定义支持的语言
    supported_languages = {
        'zh_CN': 'Chinese',
        'zh_TW': 'Chinese',
        'en_US': 'English',
        'en_GB': 'English'
    }
    
    # 默认使用英语
    lang = 'English'
    
    # 如果系统语言是中文，则使用中文
    if system_lang in supported_languages and supported_languages[system_lang] == 'Chinese':
        lang = 'Chinese'
    
    return lang

# 语言资源
language_resources = {
    'Chinese': {
        'title': '视频静音分析工具 - 能量计算版',
        'load_video': '加载视频',
        'analyze_silence': '分析静音',
        'process_video': '分割视频',
        'reset_view': '重置视图',
        'ready': '准备就绪',
        'silence_threshold': '静音阈值(dBFS):',
        'min_silence_len': '最小静音长度(ms):',
        'trim_ms': '缩短时间(ms):',
        'zoom': '缩放:',
        'position': '位置:',
        'please_load_video': '请加载视频文件',
        'time_sec': '时间 (秒)',
        'amplitude': '振幅',
        'audio_waveform': '音频波形',
        'analyzing': '正在分析静音...',
        'analysis_complete': '静音分析完成',
        'analysis_failed': '静音分析失败',
        'processing': '正在处理视频...',
        'process_complete': '视频处理完成',
        'process_failed': '视频处理失败',
        'load_failed': '加载视频失败',
        'extracting_audio': '正在提取音频...',
        'video_loaded': '已加载: {}',
        'error': '错误',
        'must_load_video': '请先加载视频文件',
        'must_analyze_first': '请先分析静音区域',
    },
    'English': {
        'title': 'Video Silence Analyzer - Energy Based',
        'load_video': 'Load Video',
        'analyze_silence': 'Analyze Silence',
        'process_video': 'Process Video',
        'reset_view': 'Reset View',
        'ready': 'Ready',
        'silence_threshold': 'Silence Threshold(dBFS):',
        'min_silence_len': 'Min Silence Length(ms):',
        'trim_ms': 'Trim Time(ms):',
        'zoom': 'Zoom:',
        'position': 'Position:',
        'please_load_video': 'Please load video file',
        'time_sec': 'Time (sec)',
        'amplitude': 'Amplitude',
        'audio_waveform': 'Audio Waveform',
        'analyzing': 'Analyzing silence...',
        'analysis_complete': 'Silence analysis complete',
        'analysis_failed': 'Silence analysis failed',
        'processing': 'Processing video...',
        'process_complete': 'Video processing complete',
        'process_failed': 'Video processing failed',
        'load_failed': 'Load video failed',
        'extracting_audio': 'Extracting audio...',
        'video_loaded': 'Loaded: {}',
        'error': 'Error',
        'must_load_video': 'Please load video file first',
        'must_analyze_first': 'Please analyze silence first',
    }
}

# 设置语言
current_language = setup_language()
lang = language_resources[current_language]

class EnergyBasedSilenceProcessor:
    """基于能量计算的静音分析工具"""
    def __init__(self, master):
        self.master = master
        self.video_path = None
        self.audio = None
        self.silence_ranges = []
        self.duration = 0
        self.current_view = [0, 10]
        self.samples = None
        self.time_axis = None
        self.task_queue = queue.Queue()
        self.config_file = "config.ini"

        # 初始化配置
        self.load_config()
        self.setup_ui()
        
        # 启动任务处理线程
        self.worker_thread = threading.Thread(target=self.process_tasks, daemon=True)
        self.worker_thread.start()
    
    def load_config(self):
        """从config.ini加载配置参数"""
        self.config = configparser.ConfigParser()
        # 设置默认值
        self.default_config = {
            'silence_threshold': '-40',
            'min_silence_len': '1000',
            'trim_ms': '500'
        }
        
        # 如果配置文件存在，则读取
        if os.path.exists(self.config_file):
            self.config.read(self.config_file)
        else:
            # 创建默认配置
            self.config['DEFAULT'] = self.default_config
            self.save_config()
    
    def save_config(self):
        """保存当前配置到config.ini"""
        try:
            with open(self.config_file, 'w') as configfile:
                self.config.write(configfile)
        except Exception as e:
            print(f"保存配置失败: {e}")
    
    def get_config_value(self, key, default):
        """获取配置值，如果不存在则返回默认值"""
        try:
            return self.config.get('DEFAULT', key)
        except (configparser.NoSectionError, configparser.NoOptionError):
            return default

    def update_config(self):
        """更新配置参数"""
        self.config['DEFAULT']['silence_threshold'] = str(self.silence_thresh.get())
        self.config['DEFAULT']['min_silence_len'] = str(self.min_silence_len.get())
        self.config['DEFAULT']['trim_ms'] = str(self.trim_ms.get())
        self.save_config()
        
    def process_tasks(self):
        """处理后台任务的线程函数"""
        while True:
            try:
                task = self.task_queue.get()
                if task is None:
                    break
                task()
                self.task_queue.task_done()
            except Exception as e:
                print(f"Task error: {e}")
    
    def setup_ui(self):
        """设置用户界面"""
        self.master.title(lang['title'])
        self.master.geometry("1000x700")
        
        # 主框架
        main_frame = tk.Frame(self.master)
        main_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=5)
        
        # 控制面板
        control_frame = tk.Frame(main_frame)
        control_frame.pack(f极l=tk.X, pady=5)
        
        # 文件选择按钮
        load_btn = tk.Button(control_frame, text=lang['load_video'], command=self.load_video)
        load_btn.pack(side=tk.LEFT, padx=5)
        
        # 分析按钮
        self.analyze_btn = tk.Button(control_frame, text=lang['analyze_silence'], command=self.analyze_silence)
        self.analyze_btn.pack(side=极LEFT, padx=5)
        
        # 处理按钮
        self.process_btn = tk.Button(control_frame, text=lang['process_video'], command=self.process_video)
        self.process_btn.pack(side=tk.LEFT, padx=5)
        
        # 重置视图按钮
        reset_btn = tk.Button(control_frame, text=lang['reset_view'], command=self.reset_view)
        reset_btn.pack(side=tk.LEFT, padx=5)
        
        # 进度显示
        self.progress_var = tk.StringVar()
        self.progress_var.set(lang['ready'])
        progress_label = tk.Label(control_frame, textvariable=self.progress_var)
        progress_label.pack(side=tk.LEFT, padx=5)
        
        # 进度条
        self.progress_bar = ttk.Progressbar(control_frame, orient=tk.HORIZONTAL, length=200, mode='determinate')
        self.progress_bar.pack(side=tk.LEFT, padx=5)
        
        # 参数设置区域
        param_frame = tk.Frame(main_frame)
        param_frame.pack(fill=tk.X, pady=5)
        
        tk.Label(param_frame, text=lang['silence_threshold']).pack(side=tk.LEFT, pad极=5)
        self.silence_thresh = tk.DoubleVar(value=float(self.get_config_value('silence_threshold', '-40')))
        tk.Entry(param_frame, textvariable=self.silence_thresh, width=6).pack(side=tk.LEFT, padx=5)
        
        tk.Label(param_frame, text=lang['min_silence_len']).pack(side=tk.LEFT, padx=5)
        self.min_silence_len = tk.IntVar(value=int(self.get_config_value('min_silence_len', '1000')))
        tk.Entry(param_frame, textvariable=self.min_silence_len, width=极).pack(side=tk.LEFT, padx=5)
        
        tk.Label(param_frame, text=lang['trim_ms']).pack(side=tk.LEFT, padx=5)
        self.trim_ms = tk.Int极(value=int(self.get_config_value('trim_ms', '500')))
        tk.Entry(param_frame, textvariable=self.trim_ms, width=6).pack(side=tk.LEFT, padx=5)
        
        # 缩放控制区域
        zoom_frame = tk.Frame(main_frame)
        zoom_frame.pack(fill=tk.X, pady=2)
        
        tk.Label(zoom_frame, text=lang['zoom']).pack(side=tk.LEFT, padx=5)
        self.zoom_var = tk.DoubleVar(value=1.0)
        self.zoom_scale = Scale(zoom_frame, from_=0.1, to=500.0, resolution=0.1, 
                          orient=tk.HORIZONTAL, variable=self.zoom_var, 
                          command=self.on_zoom_change, length=500)
        self.zoom_scale.pack(side=tk.LEFT, padx=5)

        # 波形显示区域
        self.figure = plt.Figure(figsize=(12, 3), dpi=100)
        self.ax = self.figure.add_subplot(111)
        self.canvas = FigureCanvasTkAgg(self.figure, main_frame)
        self.canvas.get_tk_widget().pack(fill=tk.BOTH, expand=True)
        
        # 位置控制滚动条
        pos_frame = tk.Frame(main_frame)
        pos_frame.pack(fill=tk.X, pady=5)
        
        tk.Label(pos_frame, text=lang['position']).pack(side=tk.LEFT, padx=5)
        self.pos_var = tk.DoubleVar(value=0.0)
        self.pos_scale = Scale(pos_frame, from_=0.0, to=1.0, resolution=0.01,
                           orient=tk.HORIZONTAL, variable=self.pos_var,
                           command=self.on_position_change)
        self.pos_scale.pack(side=tk.LEFT, fill=tk.X, expand=True, padx=5)
        
        # 绑定Alt+滚轮缩放事件
        self.canvas.get_tk_widget().bind("<Alt-MouseWheel>", self.on_mousewheel)
        
        # 初始状态
        self.clear_plot()
    
    def clear_plot(self):
        """清除图表"""
        self.ax.clear()
        self.ax.set_title(lang['please_load_video'])
        self.ax.set_xlabel(lang['time_sec'])
        self.ax.set_ylabel(lang['amplitude'])
        self.canvas.draw()
    
    def load_video(self):
        """加载视频文件并提取音频"""
        self.video_path = filedialog.askopenfilename(filetypes=[("MP4视频文件", "*.mp4")])
        if not self.video_path:
            return
            
        def load_task():
            try:
                self.master.after(0, lambda: self.progress_var.set(lang['extracting_audio']))
                self.master.after(0, self.progress_bar.start)
                
                # 创建临时目录
                temp_dir = tempfile.mkdtemp()
                audio_path = os.path.join(temp_dir, "temp_audio.wav")
                
                # 使用ffmpeg提取音频
                subprocess.run([
                    'ffmpeg', '-i', self.video_path, 
                    '-ac', '1', '-ar', '44100',
                    '-y', audio_path
                ], check=True, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
                
                # 加载音频极件
                self.audio = AudioSegment.from_wav(audio_path)
                self.duration = len(self.audio) / 1000
                
                # 预计算音频数据
                samples = np.array(self.audio.get_array_of_samples(), dtype=np.float32)
                samples = samples / (2**15)  # 标准化到[-1, 1]
                
                # 动态降采样
                if len(samples) > 100000:
                    downsample_factor = max(1, len(samples) // 50000)
                    samples = samples[::downsample_factor]
                
                self.samples = samples
                self.time_axis = np.linspace(0, self.duration, len(samples))
                
                self.master.after(0, self.on_load_complete)
                
            except Exception as e:
                self.master.after(0, lambda: self.on_load_error(str(e)))
            finally:
                try:
                    shutil.rmtree(temp_dir)
                except:
                    pass
        
        self.task_queue.put(load_task)
    
    def on_load_complete(self):
        """视频加载完成后的回调"""
        self.progress_var.set(lang['video_loaded'].format(os.path.basename(self.video_path)))
        self.progress_bar.stop()
        self.plot_full_waveform()
    
    def on_load_error(self, error_msg):
        """视频加载失败后的回调"""
        self.progress_var.set(f"{lang['load_failed']}: {error_msg}")
        self.progress_bar.stop()
        messagebox.showerror(lang['error'], f"{lang['load_failed']}:\n{error_msg}")
    
    def plot_full_waveform(self):
        """绘制完整波形"""
        if self.audio is None:
            return
            
        self.ax.clear()
        self.ax.plot(self.time_axis, self.samples, alpha=0.5, color='blue')
        self.ax.set_xlim(0, self.duration)
        self.ax.set_title(f"{lang['audio_waveform']} - {os.path.basename(self.video_path)}")
        self.ax.set_xlabel(lang['time_sec'])
        self.ax.set_ylabel(lang['amplitude'])
        self.ax.grid(True, linestyle='--', alpha=0.5)
        self.canvas.draw()
    
    def analyze_silence(self):
        """使用能量计算法分析静音"""
        if self.audio is None:
            self.progress_var.set(lang['must_load_video'])
            return
        # 在分析前保存当前配置
        self.update_config()
        self.reset_view()
        # 禁用按钮，防止重复操作
        self.analyze_btn.config(state=tk.DISABLED)
        self.process_btn.config(state=tk.DISABLED)

        def analyze_task():
            try:
                self.master.after(0, lambda: self.progress_var.set(lang['analyzing']))
                self.master.after(0, self.progress_bar.start)
                
                # 获取参数
                threshold_db = self.silence_thresh.get()
                min_silence_ms = self.min_silence_len.get()
                trim_ms = self.trim_ms.get()
                
                # 使用能量计算法检测静音
                silence_ranges = self.energy_based_silence_detection(threshold_db, min_silence_ms, trim_ms)
                self.s极ence_ranges = silence_ranges
                
                self.master.after(0, self.on_analyze_complete)
                
            except Exception as e:
                self.master.after(0, lambda: self.on_analyze_error(str(e)))
        
        self.task_queue.put(analyze_task)
    
    def energy_based_silence_detection(self, threshold_db, min_silence_ms, trim_ms):
        """基于能量计算的静音检测算法"""
        # 获取音频数据
        samples = np.array(self.audio.get_array_of_samples(), dtype=np.float32)
        sample_rate = self.audio.frame_rate
        samples = samples / (2**15)  # 标准化到[-1, 1]
        
        # 设置窗口参数
        window_size = int(sample_rate * 0.01)  # 10ms窗口
        hop_size = window_size // 2  # 50%重叠提高准确度
        
        # 向量化计算RMS能量
        num_windows = (len(samples极) - window_size) // hop_size + 1
        indices = np.arange(num_windows) * hop_size
        windows = np.lib.stride_tricks.as_strided(
            samples,
            shape=(num_windows, window_size),
            strides=(samples.strides[0] * hop_size, samples.strides[0])
        )
        
        # 计算每个窗口的RMS值
        rms_values = np.sqrt(np.mean(windows**2, axis=1))
        
        # 转换为dBFS
        max_rms = np.max(rms_values) if np.max(rms_values) > 0 else 1e-10
        rms_db = 20 * np.log10(rms_values / max_rms)
        
        # 检测静音区域
        silence_mask = rms_db < threshold_db
        
        # 找到静音段的开始和结束
        diff = np.diff(silence_mask.astype(np.int8))
        starts = np.where(diff == 1)[0] + 1  # 静音开始
        ends = np.where(diff == -1)[0] + 1   # 静音结束
        
        # 处理边界情况
        if silence_mask[0]:
            starts = np.insert(starts, 0, 0)
        if silence_mask[-1]:
            ends = np.append(ends, len(silence_mask))
        
        # 转换为毫秒时间
        raw_ranges = []
        for start_idx, end_idx in zip(starts, ends):
            start_time = (start_idx * hop_size / sample_rate) * 1000
            end_time = (end_idx * hop_size / sample_rate) * 100极
            raw_ranges.append((start_time, end_time))
        
        # 过滤最小静音长度
        min_silence_samples = (min_silence_ms * sample_rate) / 1000
        filtered_ranges = []
        for start, end in raw_ranges:
            if (end - start) >= min_silence_ms:
                filtered_ranges.append((start, end))
        
        # 缩短静音区域两端
        final_ranges = []
        for start, end in filtered_ranges:
            new_start = min(len(self.audio), start + trim_ms)
            new_end = max(0, end - trim_ms)
            if new_end > new_start:
                final_ranges.append((new_start, new_end))
        
        return final_ranges
    
    def on_analyze_complete(self):
        """静音分析完成后的回调"""
        self.progress_var.set(lang['analysis_complete'])
        self.progress_bar.stop()
        # 启用按钮
        self.analyze_btn.config(state=tk.NORMAL)
        self.process_btn.config(state=tk.NORMAL)

        self.plot_results()
    
    def on_analyze_error(self, error_msg):
        """静音分析失败后的回调"""
        self.progress_var.set(f"{lang['analysis_failed']}: {error_msg}")
        self.progress_bar.stop()
        messagebox.showerror(lang['error'], f"{lang['analysis_failed']}:\n{error_msg}")
    
    def plot_results(self):
        """绘制分析结果"""
        if not self.silence_ranges or self.audio is None:
            return
            
        # 使用draw_waveform来确保一致性
        self.draw_waveform()
        
        # 计算统计信息
        total_silence = sum(end - start for start, end in self.silence_ranges) / 1000
        silence_percent = (total_silence / self.duration) * 100
        
        # 添加统计信息
        if current_language == 'Chinese':
            info_text = (f"静音时长: {total_silence:.2f}秒 ({silence_percent:.1f}%)\n"
                        f"总时长: {self.duration:.2f}秒")
        else:
            info_text = (f"Silence: {total_silence:.2f}s ({silence_percent:.1f}%)\n"
                        f"Total: {self.duration:.2f}s")
            
        self.ax.text(0.6, 0.88, info_text, transform=self.ax.transAxes, 
                   bbox=dict(boxstyle="round,pad=0.3", fc="white", ec="gray", alpha=0.8))
        
        self.canvas.draw()
        
    def on_mousewheel(self, event):
        """Alt+鼠标滚轮缩放控制"""
        if self.audio is None:
            return
            
        # 获取鼠标位置对应的时间
        x = event.x
        y = event.y
        xdata = self.ax.transData.inverted().transform((x, y))[0]
        
        # 计算缩放因子
        zoom_factor = 1.1 if event.delta > 0 else 0.9
        current_width = self.current_view[1] - self.current_view[0]
        new_width = current_width极 * zoom_factor
        
        # 限制缩放范围
        if new_width > self.duration:
            new_width = self.duration
        elif new_width < 0.1:
            new_width = 0.1
        
        # 以鼠标位置为中心缩放
        center = xdata
        new_start = max(0, center - new_width/2)
        new_end = min(self.duration, center + new_width/2)
        
        if new_end == self.duration:
            new_start = self.duration - new_width
        elif new_start == 0:
            new_end = new_width
        
        self.current_view = [new_start, new_end]
        self.draw_waveform()

        # 更新缩放滑块
        zoom_level = self.duration / new_width
        self.zoom_var.set(zoom_level)
        
        # 更新位置滑块
        self.update_position_scale()
    
    def on_zoom_change(self, value):
        """缩放控制回调"""
        if self.audio is None:
            return
            
        zoom_factor = float(value)
        
        # 计算新的视图宽度
        new_width = self.duration / zoom_factor
        
        # 保持当前视图中心不变
        center = np.mean(self.current_view)
        new_start = max(0, center - new_width/2)
        new_end = min(self.duration, center + new_width/2)
        
        # 如果超出边界，调整极置
        if new_end == self.duration:
            new_start = self.duration - new_width
        elif new_start == 0:
            new_end = new_width
        
        self.current_view = [new_start, new_end]
        self.draw_waveform()
        
        # 更新位置滑块
        self.update_position_scale()
    
    def on_position_change(self, value):
        """位置滚动条回调"""
        if self.audio is None:
            return
            
        position = float(value)
        view_width = self.current_view[1] - self.current_view[0]
        total_width = self.duration
        
        if view_width < total_width:
            max_pos = total_width - view_width
            new_start = position * max_pos
            self.current_view = [new_start, new_start + view_width]
            self.draw_waveform()
    
    def update_position_scale(self):
        """更新位置滑块的范围和状态"""
        if self.audio is None:
            return
            
        view_width = self.current_view[1] - self.current_view[0]
        total_width = self.duration
        
        # 计算滑块可移动范围
        if view_width >= total_width:
            # 视图宽度大于等于总宽度，滑块不可用
            self.pos_scale.config(state=tk.DISABLED, to=0.0)
        else:
            # 视图宽度小于总宽度，滑块可用
            self.pos_scale.config(state=tk.NORMAL)
            
            # 计算滑块的最大值
            max_pos = (total_width - view_width) / total_width
            self.pos_scale.config(to=max_pos)
            
            # 更新当前滑块位置
            current_pos = self.current_view[0] / total_width
            if current_pos > max_pos:
                current_pos = max_pos
            self.pos_var.set(current_pos)
    def reset_view(self):
        """重置视图显示完整波形"""
        if self.audio is None:
            return
            
        self.current_view = [0, self.duration]
        self.zoom_var.set(1.0)
        self.pos_var.set(0.0)
        self.draw_waveform()
        
        # 更新位置滑块
        self.update_position_scale()
    
    def draw_waveform(self):
        """根据当前视图绘制波形和静音区域"""
        if self.audio is None:
            return
            
        # 获取可见区域数据
        start_idx = np.searchsorted(self.time_axis, self.current_view[0])
        end_idx = np.searchsorted(self.time_axis, self.current_view[1])
        start_idx = max(0, start_idx)
        end_idx = min(len(self.time_axis), end_idx)
        
        visible_time = self.time_axis[start_idx:end_idx]
        visible_samples = self.samples[start_idx:end_idx]
        
        # 动态降采样提高绘制速度
        if len(visible_time) > 1000:
            step = max(1, len(visible_time) // 1000)
            visible_time = visible_time[::step]
            visible_samples = visible_samples[::step]
        
        self.ax.clear()
        
        # 绘制波形
        self.ax.plot(visible_time, visible_samples, alpha=0.5, color='blue', label=lang['audio_waveform'])
        
        # 如果有静音分析结果，绘制静音区域
        if self.silence_ranges:
            for start, end in self.silence_ranges:
                start_sec = start / 1000
                end_sec = end / 1000
                # 只绘制当前视图范围内的静音区域
                if (start_sec < self.current_view[1] and end_sec > self.current_view[0]):
                    self.ax.axvspan(start_sec, end_sec, color='red', alpha=0.3, label='静音区域')
            # 计算统计信息
            total_silence = sum(end - start for start, end in self.silence_ranges) / 1000
            silence_percent = (total_silence / self.duration) * 100
            
            # 添加统计信息
            if current_language == 'Chinese':
                info_text = (f"静音时长: {total_silence:.极f}秒 ({silence_percent:.1f}%)\n"
                            f"总时长: {self.duration:.2f}秒")
            else:
                info_text = (f"Silence: {total_silence:.2f}s ({silence_percent:.1f}%)\n"
                            f"Total: {self.duration:.2f}s")
                
            self.ax.text(0.6, 0.88, info_text, transform=self.ax.transAxes, 
                    bbox=dict(boxstyle="round,pad=0.3", fc="white", ec="gray", alpha=0.8))
        
        self.ax.set_xlim(self.current_view[0], self.current_view[1])
        self.ax.grid(True, linestyle='--', alpha=0.5)
        
        # 如果有静音分析结果，添加图例
        if self.silence_ranges:
            handles, labels = self.ax.get_legend_handles_labels()
            by_label = dict(zip(labels, handles))
            self.ax.legend(by_label.values(), by_label.keys(), loc='upper right')
        
        self.canvas.draw()
    
    def process_video(self):
        """处理视频，去除静音部分"""
        if not self.silence_ranges or self.audio is None:
            self.progress_var.set(lang['must_analyze_first'])
            return
            
        output_path = filedialog.asksaveasfilename(
            defaultextension=".mp4",
            filetypes=[("MP4视频文件", "*.mp4")],
            initialfile=f"{os.path.splitext(os.path.basename(self.video_path))[0]}_processed.mp4"
        )
        if not output_path:
            return
            
        def process_task():
            try:
                self.master.after(0, lambda: self.progress_var.set(lang['processing']))
                self.master.after(0, self.progress_bar.start)
                
                # 生成FFmpeg过滤器
                filter_complex = self.generate_filter_complex()
                temp_dir = tempfile.mkdtemp()
                filter_script极ath = os.path.join(temp_dir, "filter_complex.txt")
                
                with open(filter_script_path, 'w') as f:
                    f.write(filter_complex)
                
                # 运行FFmpeg
                subprocess.run([
                    'ffmpeg', '-i', self.video_path,
                    '-filter_complex_script', filter_script_path,
                    '-map', '[outv]', '-map', '[outa]',
                    '-极:v', 'libx264', '-preset', 'fast', '-crf', '22',
                    '-c:a', 'aac', '-b:a', '128k',
                    '-y', output_path
                ], check=True)
                
                self.master.after(0, lambda: self.on_process_complete(output_path))
                
            except Exception as e:
                self.master.after(0, lambda: self.on_process_error(str(e)))
            finally:
                try:
                    shutil.rmtree(temp_dir)
                except:
                    pass
        
        self.task_queue.put(process_task)
    
    def generate_filter_complex(self):
        """生成FFmpeg过滤器命令"""
        # 获取非静音段落
        non_silent_ranges = []
        prev_end = 0
        
        for start, end in self.silence_ranges:
            if start > prev_end:
                non_silent_ranges.append((prev_end, start))
            prev_end = end
        
        if prev_end < len(self.audio):
            non_silent_ranges.append((prev_end, len(self.audio)))
        
        # 生成过滤器命令
        segments = []
        video_filters = []
        audio_filters = []
        
        for i, (start_ms, end_ms) in enumerate(non_silent_ranges):
            start_sec = start_ms / 1000.0
            end_sec = end_ms / 100极.0
            
            video_filters.append(f"[0:v]trim=start={start_sec}:end={end_sec},setpts=PTS-STARTPTS[v{i}];")
            audio_filters.append(f"[0:a]atrim=start={start_sec}:end={end_sec},asetpts=PTS-STARTPTS[a{i}];")
        
        # 连接所有片段
        v_list = ''.join([f"[v{i}]" for i in range(len(non_silent_ranges))])
        a_list = ''.join([f"[a{i}]" for i in range(len(non_silent_ranges))])
        
        video_filters.append(f"{v_list}concat=n={len(non_silent_ranges)}:v=1:a=0[outv];")
        audio_filters.append(f"{a_list}concat=n={len(non_silent_ranges)}:v=0:a极1[outa]")
        
        return ''.join(video_filters + audio_filters)
    
    def on_process_complete(self, output_path):
        """视频处理完成后的回调"""
        self.progress_var.set(f"{lang['process_complete']}: {os.path.basename(output_path)}")
        self.progress_bar.stop()
        messagebox.showinfo(lang['process_complete'], f"{lang['process_complete']}!\n{output_path}")
    
    def on_process_error(self, error_msg):
        """视频处理失败后的回调"""
        self.progress_var.set(f"{lang['process_failed']}: {error_msg}")
        self.progress_bar.stop()
        messagebox.showerror(lang['error'], f"{lang['process_failed']}:\n{error_msg}")

if __name__ == "__main__":
    root = tk.Tk()
    app = EnergyBasedSilenceProcessor(root)
    root.mainloop()