package luke.audio.processor;

import luke.core.utils.PlaybackStateManager;
import luke.core.config.AppDataManager;
import luke.core.logging.LogUtil;
import luke.audio.analyzer.UniversalAudioDecoder;

import org.bytedeco.ffmpeg.global.avcodec;
import org.bytedeco.ffmpeg.global.avutil;
import org.bytedeco.javacv.FFmpegFrameGrabber;
import org.bytedeco.javacv.FFmpegFrameRecorder;
import org.bytedeco.javacv.Frame;

import java.io.*;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.util.concurrent.TimeUnit;

/**
 * 抖音视频结尾音去除器
 * 用于检测和去除抖音视频结尾的特定音频片段
 */
public class DouyinEndingSoundRemover {
    
    private static final String DY_WAV_FILE = "dy.wav";
    private static final double CORRELATION_THRESHOLD = 0.9; // 90%相关性阈值
    
    /**
     * 检查并处理抖音视频结尾音
     * 
     * @param inputFile 输入的视频文件
     * @return 处理后的文件（如果进行了处理）或原始文件（如果没有进行处理）
     */
    public static File checkAndRemoveEndingSound(File inputFile) {
        // 检查PlaybackStateManager的去除抖音视频结尾音状态
        if (!PlaybackStateManager.getInstance().isRemoveDouyinEndingSound()) {
            LogUtil.debug("DouyinEndingSoundRemover", "检查结尾音", "去除抖音视频结尾音功能未启用");
            return inputFile;
        }
        
        // 检查文件后缀是否为mp4
        if (!isMp4File(inputFile)) {
            LogUtil.debug("DouyinEndingSoundRemover", "检查结尾音", "文件不是MP4格式: " + inputFile.getName());
            return inputFile;
        }
        
        // 调用去除结尾音方法
        return removeEndingSound(inputFile);
    }
    
    /**
     * 判断文件是否为MP4格式
     * 
     * @param file 文件
     * @return 是否为MP4格式
     */
    private static boolean isMp4File(File file) {
        if (file == null || !file.exists()) {
            return false;
        }
        
        String fileName = file.getName().toLowerCase();
        return fileName.endsWith(".mp4");
    }
    
    /**
     * 去除视频结尾音
     * 
     * @param inputFile 输入的视频文件
     * @return 处理后的文件
     */
    private static File removeEndingSound(File inputFile) {
        try {
            // 获取dy.wav文件
            File dyWavFile = getDyWavFile();
            if (dyWavFile == null || !dyWavFile.exists()) {
                LogUtil.error("DouyinEndingSoundRemover", "去除结尾音", "dy.wav文件不存在");
                return inputFile;
            }
            
            // 获取dy.wav的时间长度
            double dyWavDuration = getAudioDuration(dyWavFile);
            if (dyWavDuration <= 0) {
                LogUtil.error("DouyinEndingSoundRemover", "去除结尾音", "无法获取dy.wav的时长");
                return inputFile;
            }
            
            // 获取输入文件的时长
            double inputDuration = getVideoDuration(inputFile);
            if (inputDuration <= 0) {
                LogUtil.error("DouyinEndingSoundRemover", "去除结尾音", "无法获取输入文件的时长");
                return inputFile;
            }
            
            // 提取输入文件最后n秒的音频数据
            File lastNSecondsAudio = extractLastNSecondsAudio(inputFile, dyWavDuration);
            if (lastNSecondsAudio == null || !lastNSecondsAudio.exists()) {
                LogUtil.error("DouyinEndingSoundRemover", "去除结尾音", "无法提取最后" + dyWavDuration + "秒的音频");
                return inputFile;
            }
            
            // 添加调试信息
            LogUtil.debug("DouyinEndingSoundRemover", "去除结尾音", "dy.wav文件: " + dyWavFile.getAbsolutePath());
            LogUtil.debug("DouyinEndingSoundRemover", "去除结尾音", "提取的音频文件: " + lastNSecondsAudio.getAbsolutePath());
            
            // 验证提取的音频是否来自视频末尾
            if (!isAudioFromVideoEnd(inputFile, lastNSecondsAudio, dyWavDuration)) {
                LogUtil.debug("DouyinEndingSoundRemover", "去除结尾音", "提取的音频不是来自视频末尾");
                return inputFile;
            }
            
            // 检查时长是否合理（允许小的误差范围）
            double extractedDuration = getAudioDuration(lastNSecondsAudio);
            double durationDiff = Math.abs(dyWavDuration - extractedDuration);
            LogUtil.debug("DouyinEndingSoundRemover", "去除结尾音", "dy.wav时长: " + dyWavDuration + "秒, 提取音频时长: " + extractedDuration + "秒, 差异: " + durationDiff + "秒");
            
            // 由于我们现在精准截取，时长应该非常接近
            // 只允许±0.1秒的误差范围，确保精确匹配
            if (durationDiff > 0.1) {
                LogUtil.debug("DouyinEndingSoundRemover", "去除结尾音", "音频时长不匹配，跳过相关性计算");
                return inputFile;
            }
            
            // 计算两个音频的相关性（包含时间偏移和响度归一化）
            double correlation = calculateAudioCorrelationWithNormalization(dyWavFile, lastNSecondsAudio);
            LogUtil.debug("DouyinEndingSoundRemover", "去除结尾音", "音频相关性: " + correlation);
            
            // 如果相关性大于阈值，则去除结尾音
            if (correlation > CORRELATION_THRESHOLD) {
                LogUtil.info("DouyinEndingSoundRemover", "去除结尾音", "检测到抖音结尾音，相关性: " + correlation);
                return trimVideo(inputFile, inputDuration - dyWavDuration);
            } else {
                LogUtil.debug("DouyinEndingSoundRemover", "去除结尾音", "未检测到抖音结尾音，相关性: " + correlation);
                return inputFile;
            }
        } catch (Exception e) {
            LogUtil.error("DouyinEndingSoundRemover", "去除结尾音", "处理文件时发生错误: " + e.getMessage());
            e.printStackTrace(); // 用于调试的堆栈跟踪
            return inputFile;
        }
    }
    
    /**
     * 获取dy.wav文件路径
     * 
     * @return dy.wav文件
     */
    private static File getDyWavFile() {
        try {
            // 获取应用程序数据目录
            Path appDataDir = AppDataManager.getInstance().getAppDataDirectory();
            Path dyWavPath = appDataDir.resolve("audio").resolve(DY_WAV_FILE);
            File dyWavFile = dyWavPath.toFile();
            
            // 检查文件是否存在
            if (!dyWavFile.exists()) {
                LogUtil.error("DouyinEndingSoundRemover", "获取dy.wav文件", "dy.wav文件不存在: " + dyWavFile.getAbsolutePath());
                return null;
            }
            
            return dyWavFile;
        } catch (Exception e) {
            LogUtil.error("DouyinEndingSoundRemover", "获取dy.wav文件", "获取文件路径时发生错误: " + e.getMessage());
            return null;
        }
    }
    
    /**
     * 获取音频文件的时长
     * 
     * @param audioFile 音频文件
     * @return 音频时长（秒）
     * @throws Exception 如果获取时长失败
     */
    private static double getAudioDuration(File audioFile) throws Exception {
        try (FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(audioFile)) {
            grabber.start();
            double duration = grabber.getLengthInTime() / 1_000_000.0; // 微秒转秒
            grabber.stop();
            return duration;
        }
    }
    
    /**
     * 获取视频文件的时长
     * 
     * @param videoFile 视频文件
     * @return 视频时长（秒）
     * @throws Exception 如果获取时长失败
     */
    private static double getVideoDuration(File videoFile) throws Exception {
        try (FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(videoFile)) {
            grabber.start();
            double duration = grabber.getLengthInTime() / 1_000_000.0; // 微秒转秒
            grabber.stop();
            return duration;
        }
    }
    
    /**
     * 提取视频最后n秒的音频
     * 
     * @param videoFile 视频文件
     * @param durationSeconds 要提取的时长（秒）
     * @return 提取的音频文件
     * @throws Exception 如果提取失败
     */
    private static File extractLastNSecondsAudio(File videoFile, double durationSeconds) throws Exception {
        Path tempDir = AppDataManager.getInstance().getTempDirectory();
        File outputFile = tempDir.resolve("last_" + durationSeconds + "s.wav").toFile();
        
        try (FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(videoFile)) {
            grabber.start();
            
            // 获取视频总时长（微秒）
            long totalDuration = grabber.getLengthInTime();
            LogUtil.debug("DouyinEndingSoundRemover", "提取音频", "视频总时长: " + totalDuration + " 微秒");
            
            // 精准计算开始时间（微秒）
            // 根据dy.wav的时长精准截取视频末尾的音频
            long startTime = (long) (totalDuration - (durationSeconds * 1_000_000));
            if (startTime < 0) {
                startTime = 0;
            }
            
            LogUtil.debug("DouyinEndingSoundRemover", "提取音频", "视频总时长: " + totalDuration + " 微秒");
            LogUtil.debug("DouyinEndingSoundRemover", "提取音频", "开始时间: " + startTime + " 微秒");
            LogUtil.debug("DouyinEndingSoundRemover", "提取音频", "目标时长: " + durationSeconds + " 秒");
            
            // 设置开始时间
            grabber.setTimestamp(startTime);
            
            // 创建录制器（统一使用单声道和较低采样率以提高性能）
            try (FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(outputFile, 1)) { // 强制单声道
                recorder.setSampleRate(8000); // 使用较低采样率以提高性能
                recorder.setAudioCodec(avcodec.AV_CODEC_ID_PCM_S16LE);
                recorder.setSampleFormat(avutil.AV_SAMPLE_FMT_S16);
                recorder.setFormat("wav");
                
                // 禁用视频流
                recorder.setVideoBitrate(0);
                recorder.setFrameRate(0);
                
                recorder.start();
                
                Frame frame;
                double recordedDuration = 0.0;
                long startTimestamp = startTime;
                
                // 精确计算结束时间（微秒）
                long endTime = Math.min(totalDuration, startTime + (long)(durationSeconds * 1_000_000));
                
                while ((frame = grabber.grab()) != null) {
                    // 只处理音频帧
                    if (frame.samples != null) {
                        // 检查是否已达到结束时间
                        if (grabber.getTimestamp() > endTime) {
                            LogUtil.debug("DouyinEndingSoundRemover", "提取音频", "已达到结束时间，停止录制");
                            break;
                        }
                        
                        recorder.record(frame);
                        // 更新已录制时长
                        recordedDuration = (grabber.getTimestamp() - startTimestamp) / 1_000_000.0;
                    }
                }
                
                LogUtil.debug("DouyinEndingSoundRemover", "提取音频", "实际录制时长: " + recordedDuration + " 秒");
                
                recorder.stop();
            }
            
            grabber.stop();
            return outputFile;
        }
    }
    
    /**
     * 验证提取的音频是否来自视频末尾
     * 
     * @param videoFile 视频文件
     * @param extractedAudio 提取的音频文件
     * @param expectedDuration 期望的时长
     * @return 是否来自视频末尾
     */
    private static boolean isAudioFromVideoEnd(File videoFile, File extractedAudio, double expectedDuration) {
        try {
            // 获取视频总时长
            double videoDuration = getVideoDuration(videoFile);
            
            // 获取提取的音频时长
            double extractedDuration = getAudioDuration(extractedAudio);
            
            // 检查提取的音频时长是否合理
            double durationDiff = Math.abs(extractedDuration - expectedDuration);
            LogUtil.debug("DouyinEndingSoundRemover", "验证音频来源", "时长差异: " + durationDiff + "秒");
            // 只允许±0.1秒的误差范围，确保精确匹配
            if (durationDiff > 0.1) {
                LogUtil.debug("DouyinEndingSoundRemover", "验证音频来源", "提取的音频时长不匹配: " + extractedDuration + "秒, 期望时长: " + expectedDuration + "秒");
                return false;
            }
            
            LogUtil.debug("DouyinEndingSoundRemover", "验证音频来源", "视频总时长: " + videoDuration + "秒, 提取音频时长: " + extractedDuration + "秒");
            return true;
        } catch (Exception e) {
            LogUtil.error("DouyinEndingSoundRemover", "验证音频来源", "验证音频来源时发生错误: " + e.getMessage());
            return false;
        }
    }
    
    /**
     * 计算两个音频文件的相关性
     * 
     * @param file1 第一个音频文件
     * @param file2 第二个音频文件
     * @return 相关性值（0-1）
     * @throws Exception 如果计算失败
     */
    private static double calculateAudioCorrelation(File file1, File file2) throws Exception {
        try {
            // 解码两个音频文件
            UniversalAudioDecoder decoder = new UniversalAudioDecoder();
            UniversalAudioDecoder.AudioData audioData1 = decoder.decodeAudioFile(file1);
            UniversalAudioDecoder.AudioData audioData2 = decoder.decodeAudioFile(file2);
            
            // 统一音频参数：转换为单声道
            float[] monoData1 = convertToMono(audioData1.data, audioData1.channels);
            float[] monoData2 = convertToMono(audioData2.data, audioData2.channels);
            
            // 统一音频参数：重采样到相同采样率（使用较低的采样率）
            int targetSampleRate = Math.min(audioData1.sampleRate, audioData2.sampleRate);
            float[] resampledData1 = resample(monoData1, audioData1.sampleRate, targetSampleRate);
            float[] resampledData2 = resample(monoData2, audioData2.sampleRate, targetSampleRate);
            
            // 计算相关性
            return calculateCorrelation(resampledData1, resampledData2);
        } catch (Exception e) {
            LogUtil.error("DouyinEndingSoundRemover", "计算相关性", "计算音频相关性时发生错误: " + e.getMessage());
            throw e;
        }
    }
    
    /**
     * 计算两个音频文件的相关性（包含时间偏移和响度归一化）
     * 
     * @param file1 第一个音频文件（dy.wav）
     * @param file2 第二个音频文件（提取的音频）
     * @return 相关性值（0-1）
     * @throws Exception 如果计算失败
     */
    private static double calculateAudioCorrelationWithNormalization(File file1, File file2) throws Exception {
        try {
            // 解码两个音频文件
            UniversalAudioDecoder decoder = new UniversalAudioDecoder();
            UniversalAudioDecoder.AudioData audioData1 = decoder.decodeAudioFile(file1);
            UniversalAudioDecoder.AudioData audioData2 = decoder.decodeAudioFile(file2);
            
            // 统一音频参数：转换为单声道
            float[] monoData1 = convertToMono(audioData1.data, audioData1.channels);
            float[] monoData2 = convertToMono(audioData2.data, audioData2.channels);
            
            // 统一音频参数：重采样到相同采样率（使用较低的采样率）
            int targetSampleRate = Math.min(audioData1.sampleRate, audioData2.sampleRate);
            // 为了提高性能，我们使用较低的目标采样率
            // 8000Hz对于语音识别已经足够，可以显著提高计算速度
            int lowSampleRate = 8000;
            targetSampleRate = Math.min(targetSampleRate, lowSampleRate);
            
            float[] resampledData1 = resample(monoData1, audioData1.sampleRate, targetSampleRate);
            float[] resampledData2 = resample(monoData2, audioData2.sampleRate, targetSampleRate);
            
            // 响度归一化
            float[] normalizedData1 = normalizeLoudness(resampledData1);
            float[] normalizedData2 = normalizeLoudness(resampledData2);
            
            // 计算相关性（允许时间偏移）
            return calculateCorrelationWithOffset(normalizedData1, normalizedData2);
        } catch (Exception e) {
            LogUtil.error("DouyinEndingSoundRemover", "计算相关性", "计算音频相关性时发生错误: " + e.getMessage());
            throw e;
        }
    }
    
    /**
     * 对音频数据进行响度归一化
     * 
     * @param audioData 音频数据
     * @return 归一化后的音频数据
     */
    private static float[] normalizeLoudness(float[] audioData) {
        if (audioData.length == 0) {
            return audioData;
        }
        
        // 计算RMS（均方根）
        double rms = 0.0;
        for (float sample : audioData) {
            rms += sample * sample;
        }
        rms = Math.sqrt(rms / audioData.length);
        
        // 避免除零错误
        if (rms == 0) {
            return audioData;
        }
        
        // 归一化到目标RMS（0.1）
        float targetRMS = 0.1f;
        float gain = targetRMS / (float) rms;
        
        // 应用增益
        float[] normalizedData = new float[audioData.length];
        for (int i = 0; i < audioData.length; i++) {
            normalizedData[i] = audioData[i] * gain;
            // 限制幅度在[-1, 1]范围内
            if (normalizedData[i] > 1.0f) normalizedData[i] = 1.0f;
            if (normalizedData[i] < -1.0f) normalizedData[i] = -1.0f;
        }
        
        return normalizedData;
    }
    
    /**
     * 计算两个音频数据数组的相关性（允许时间偏移）
     * 
     * @param data1 第一个音频数据数组
     * @param data2 第二个音频数据数组
     * @return 相关性值（0-1）
     */
    private static double calculateCorrelationWithOffset(float[] data1, float[] data2) {
        if (data1.length == 0 || data2.length == 0) {
            return 0.0;
        }
        
        // 确保data1是较长的数组，data2是模板数组
        float[] signal = data1.length >= data2.length ? data1 : data2;
        float[] template = data1.length >= data2.length ? data2 : data1;
        
        // 计算模板的能量
        double templateEnergy = 0.0;
        for (float v : template) {
            templateEnergy += v * v;
        }
        
        if (templateEnergy == 0) {
            return 0.0;
        }
        
        // 使用滑动窗口计算最大相关性（允许±0.1秒的时间偏移以提高性能）
        double maxCorrelation = 0.0;
        int templateLength = template.length;
        int sampleRate = 8000; // 使用较低采样率
        int maxOffset = sampleRate / 10; // 允许±0.1秒的偏移
        
        // 在允许的偏移范围内搜索最佳匹配
        for (int offset = -maxOffset; offset <= maxOffset; offset++) {
            double correlation = 0.0;
            double signalEnergy = 0.0;
            
            // 计算相关性
            int start = Math.max(0, offset);
            int end = Math.min(signal.length, signal.length + offset);
            
            if (end > start) {
                for (int i = start; i < end; i++) {
                    int templateIndex = i - offset;
                    if (templateIndex >= 0 && templateIndex < template.length) {
                        correlation += signal[i] * template[templateIndex];
                        signalEnergy += signal[i] * signal[i];
                    }
                }
                
                // 归一化相关性
                if (signalEnergy > 0) {
                    correlation = Math.abs(correlation) / Math.sqrt(signalEnergy * templateEnergy);
                    if (correlation > maxCorrelation) {
                        maxCorrelation = correlation;
                    }
                }
            }
        }
        
        return maxCorrelation;
    }
    
    /**
     * 对音频数据进行重采样
     * 
     * @param audioData 音频数据
     * @param originalSampleRate 原始采样率
     * @param targetSampleRate 目标采样率
     * @return 重采样后的音频数据
     */
    private static float[] resample(float[] audioData, int originalSampleRate, int targetSampleRate) {
        // 如果采样率相同，直接返回
        if (originalSampleRate == targetSampleRate || audioData.length == 0) {
            return audioData;
        }
        
        // 为了提高性能，我们使用较低的目标采样率
        // 8000Hz对于语音识别已经足够，可以显著提高计算速度
        int lowSampleRate = 8000;
        targetSampleRate = Math.min(targetSampleRate, lowSampleRate);
        
        // 计算重采样比例
        double ratio = (double) originalSampleRate / targetSampleRate;
        int newLength = (int) (audioData.length / ratio);
        float[] resampledData = new float[newLength];
        
        // 简单的线性插值重采样
        for (int i = 0; i < newLength; i++) {
            double originalIndex = i * ratio;
            int index1 = (int) Math.floor(originalIndex);
            int index2 = Math.min(index1 + 1, audioData.length - 1);
            
            // 线性插值
            double fraction = originalIndex - index1;
            resampledData[i] = (float) (audioData[index1] + fraction * (audioData[index2] - audioData[index1]));
        }
        
        return resampledData;
    }
    
    /**
     * 将多声道音频数据转换为单声道
     * 
     * @param audioData 音频数据
     * @param channels 声道数
     * @return 单声道音频数据
     */
    private static float[] convertToMono(float[] audioData, int channels) {
        // 如果已经是单声道，直接返回
        if (channels <= 1 || audioData.length == 0) {
            return audioData;
        }
        
        // 计算单声道数据长度
        int monoLength = audioData.length / channels;
        float[] monoData = new float[monoLength];
        
        // 将多声道数据混合为单声道
        for (int i = 0; i < monoLength; i++) {
            float sum = 0.0f;
            for (int c = 0; c < channels; c++) {
                sum += audioData[i * channels + c];
            }
            monoData[i] = sum / channels; // 平均值
        }
        
        return monoData;
    }
    
    /**
     * 计算两个音频数据数组的相关性
     * 使用归一化互相关算法
     * 
     * @param data1 第一个音频数据数组
     * @param data2 第二个音频数据数组
     * @return 相关性值（0-1）
     */
    private static double calculateCorrelation(float[] data1, float[] data2) {
        if (data1.length == 0 || data2.length == 0) {
            return 0.0;
        }
        
        // 确保data1是较长的数组，data2是模板数组
        float[] signal = data1.length >= data2.length ? data1 : data2;
        float[] template = data1.length >= data2.length ? data2 : data1;
        
        // 计算模板的能量
        double templateEnergy = 0.0;
        for (float v : template) {
            templateEnergy += v * v;
        }
        
        if (templateEnergy == 0) {
            return 0.0;
        }
        
        // 使用滑动窗口计算最大相关性（允许±1秒的时间偏移）
        double maxCorrelation = 0.0;
        int templateLength = template.length;
        int sampleRate = 8000; // 假设采样率为8000Hz
        int maxOffset = sampleRate; // 允许±1秒的偏移
        
        // 在允许的偏移范围内搜索最佳匹配
        for (int offset = -maxOffset; offset <= maxOffset; offset++) {
            double correlation = 0.0;
            double signalEnergy = 0.0;
            
            // 计算相关性
            int start = Math.max(0, offset);
            int end = Math.min(signal.length, signal.length + offset);
            
            if (end > start) {
                for (int i = start; i < end; i++) {
                    int templateIndex = i - offset;
                    if (templateIndex >= 0 && templateIndex < template.length) {
                        correlation += signal[i] * template[templateIndex];
                        signalEnergy += signal[i] * signal[i];
                    }
                }
                
                // 归一化相关性
                if (signalEnergy > 0) {
                    correlation = Math.abs(correlation) / Math.sqrt(signalEnergy * templateEnergy);
                    if (correlation > maxCorrelation) {
                        maxCorrelation = correlation;
                    }
                }
            }
        }
        
        return maxCorrelation;
    }
    
    /**
     * 裁剪视频文件（使用FFmpeg命令行工具以提高性能）
     * 
     * @param inputFile 输入视频文件
     * @param endTimeSeconds 结束时间（秒）
     * @return 裁剪后的文件
     * @throws Exception 如果裁剪失败
     */
    private static File trimVideo(File inputFile, double endTimeSeconds) throws Exception {
        // 使用FFmpeg命令行工具进行快速截断，无需重新编码
        Path tempDir = AppDataManager.getInstance().getTempDirectory();
        File outputFile = tempDir.resolve("trimmed_" + inputFile.getName()).toFile();
        
        // 构建FFmpeg命令
        String[] cmd = {
            "ffmpeg",
            "-i", inputFile.getAbsolutePath(),
            "-t", String.valueOf(endTimeSeconds),
            "-c", "copy", // 使用流复制，避免重新编码
            "-y", // 覆盖输出文件
            outputFile.getAbsolutePath()
        };
        
        LogUtil.debug("DouyinEndingSoundRemover", "裁剪视频", "执行FFmpeg命令: " + String.join(" ", cmd));
        
        // 执行命令
        ProcessBuilder pb = new ProcessBuilder(cmd);
        pb.redirectErrorStream(true);
        Process process = pb.start();
        
        // 读取输出
        BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()));
        String line;
        while ((line = reader.readLine()) != null) {
            LogUtil.debug("DouyinEndingSoundRemover", "FFmpeg输出", line);
        }
        
        int exitCode = process.waitFor();
        if (exitCode != 0) {
            throw new IOException("FFmpeg命令执行失败，退出码: " + exitCode);
        }
        
        // 用裁剪后的文件替换原始文件
        Files.move(outputFile.toPath(), inputFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
        LogUtil.info("DouyinEndingSoundRemover", "裁剪视频", "视频已裁剪并替换原始文件");
        return inputFile;
    }
}