package com.hushan.audio;

import cn.hutool.core.io.FileUtil;
import lombok.extern.slf4j.Slf4j;
import org.bytedeco.ffmpeg.avcodec.AVCodec;
import org.bytedeco.ffmpeg.avcodec.AVCodecContext;
import org.bytedeco.ffmpeg.avcodec.AVPacket;
import org.bytedeco.ffmpeg.avutil.AVChannelLayout;
import org.bytedeco.ffmpeg.avutil.AVDictionary;
import org.bytedeco.ffmpeg.avutil.AVFrame;
import org.bytedeco.ffmpeg.avutil.AVRational;
import org.bytedeco.ffmpeg.global.avformat;
import org.bytedeco.ffmpeg.global.avutil;
import org.bytedeco.ffmpeg.swresample.SwrContext;
import org.bytedeco.javacpp.*;

import javax.sound.sampled.*;
import java.io.File;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicBoolean;

import static org.bytedeco.ffmpeg.global.avcodec.*;
import static org.bytedeco.ffmpeg.global.avdevice.avdevice_register_all;
import static org.bytedeco.ffmpeg.global.avformat.avformat_network_init;
import static org.bytedeco.ffmpeg.global.avutil.*;
import static org.bytedeco.ffmpeg.global.swresample.*;

/**
 * 音频采集任务 - 封装音频采集、重采样、MP3编码的完整流程
 */
@Slf4j
public class AudioCaptureTask {
    
    // 音频采集参数
    private final static float AUDIO_CART_SAMPLE_RATE = 44100f;
    private final int MP3_FRAME_SIZE = 1152;
    private final Object swrLock = new Object();
    
    // 任务控制
    private final AtomicBoolean isRunning = new AtomicBoolean(false);
    private final AtomicBoolean shouldStop = new AtomicBoolean(false);
    private CompletableFuture<Void> captureTask;
    
    // 任务配置
    private final String taskId;
    private final String outputFileName;
    private final int mixerIndex;
    private final long durationSeconds;
    
    // 音频处理组件
    private TargetDataLine targetDataLine;
    private AVCodecContext codecContext;
    private SwrContext swrContext;
    private AVFrame frame;
    private AVPacket packet;
    private AVChannelLayout channelLayout;
    
    // 累积缓冲区
    private BytePointer[] accumulatedSamples = null;
    private int accumulatedSampleCount = 0;
    
    // 帧计数器用于PTS计算
    private long totalEncodedFrames = 0;
    
    public AudioCaptureTask(String taskId, String outputFileName, int mixerIndex, long durationSeconds) {
        this.taskId = taskId;
        this.outputFileName = outputFileName;
        this.mixerIndex = mixerIndex;
        this.durationSeconds = durationSeconds;
    }
    
    /**
     * 启动异步音频采集任务
     */
    public CompletableFuture<Void> startAsync() {
        if (isRunning.compareAndSet(false, true)) {
            shouldStop.set(false);
            captureTask = CompletableFuture.runAsync(this::runCaptureTask)
                .whenComplete((result, throwable) -> {
                    isRunning.set(false);
                    cleanup();
                    if (throwable != null) {
                        log.error("音频采集任务 {} 执行失败", taskId, throwable);
                    } else {
                        log.info("音频采集任务 {} 完成", taskId);
                    }
                });
            log.info("音频采集任务 {} 已启动", taskId);
            return captureTask;
        } else {
            throw new IllegalStateException("任务 " + taskId + " 已在运行中");
        }
    }
    
    /**
     * 停止音频采集任务
     */
    public void stop() {
        if (isRunning.get()) {
            shouldStop.set(true);
            log.info("正在停止音频采集任务 {}", taskId);
            
            // 等待任务完成
            if (captureTask != null) {
                try {
                    captureTask.join();
                } catch (Exception e) {
                    log.warn("等待任务 {} 停止时发生异常", taskId, e);
                }
            }
        } else {
            log.warn("任务 {} 未在运行", taskId);
        }
    }
    
    /**
     * 检查任务是否正在运行
     */
    public boolean isRunning() {
        return isRunning.get();
    }
    
    public String getTaskId() {
        return taskId;
    }
    
    public String getOutputFileName() {
        return outputFileName;
    }
    
    /**
     * 音频采集任务的主要执行逻辑
     */
    private void runCaptureTask() {
        try {
            initializeFFmpeg();
            initializeAudioCapture();
            initializeMP3Encoder();
            initializeResampler();
            
            performAudioCapture();
            
        } catch (Exception e) {
            log.error("音频采集任务 {} 执行过程中发生错误", taskId, e);
            throw new RuntimeException("音频采集任务执行失败", e);
        }
    }
    
    /**
     * 初始化FFmpeg
     */
    private void initializeFFmpeg() {
        log.debug("任务 {} 初始化FFmpeg", taskId);
        
        Loader.load(avutil.class);
        Loader.load(org.bytedeco.ffmpeg.global.swresample.class);
        Loader.load(org.bytedeco.ffmpeg.global.avcodec.class);
        Loader.load(avformat.class);
        av_log_set_level(AV_LOG_INFO);
        av_jni_set_java_vm(Loader.getJavaVM(), null);
        avformat_network_init();
        Loader.load(org.bytedeco.ffmpeg.global.avdevice.class);
        avdevice_register_all();
    }
    
    /**
     * 初始化音频采集设备
     */
    private void initializeAudioCapture() throws LineUnavailableException {
        log.debug("任务 {} 初始化音频采集设备", taskId);
        
        Mixer mixer;
        try {
            Mixer.Info[] mixerInfo = AudioSystem.getMixerInfo();
            
            if (mixerInfo == null || mixerInfo.length == 0) {
                log.warn("任务 {} AudioSystem.getMixerInfo() 返回空结果，使用默认混音器", taskId);
                mixer = AudioSystem.getMixer(null); // 使用默认混音器
            } else if (mixerIndex >= mixerInfo.length) {
                log.warn("任务 {} 混音器索引 {} 超出范围 {}，使用默认混音器", taskId, mixerIndex, mixerInfo.length);
                mixer = AudioSystem.getMixer(null); // 使用默认混音器
            } else {
                mixer = AudioSystem.getMixer(mixerInfo[mixerIndex]);
            }
        } catch (Exception e) {
            log.warn("任务 {} 获取混音器信息失败，使用默认混音器: {}", taskId, e.getMessage());
            mixer = AudioSystem.getMixer(null); // 使用默认混音器
        }
        
        if (mixer == null) {
            throw new RuntimeException("无法获取音频混音器");
        }

        AudioFormat audioFormat = new AudioFormat(
                AudioFormat.Encoding.PCM_SIGNED,
                AUDIO_CART_SAMPLE_RATE,
                16,
                2,
                (16 / 8) * 2,
                AUDIO_CART_SAMPLE_RATE,
                false);

        DataLine.Info dataLineInfo = new DataLine.Info(TargetDataLine.class, audioFormat);
        targetDataLine = (TargetDataLine) mixer.getLine(dataLineInfo);
        
        // 增大缓冲区以防止数据丢失，但不要过大以免延迟
        // 计算: 44100 * 2声道 * 2字节 * 0.1秒 = 17640字节
        int bufferSize = (int)(AUDIO_CART_SAMPLE_RATE * 2 * 2 * 0.1); // 100ms 缓冲
        bufferSize = Math.max(bufferSize, 9216); // 最小2倍于读取块大小
        
        targetDataLine.open(audioFormat, bufferSize);
        
        // 获取实际缓冲区大小
        int actualBufferSize = targetDataLine.getBufferSize();
        log.info("任务 {} 音频设备初始化完成，缓冲区大小: {} 字节 ({}ms)", 
                taskId, actualBufferSize, String.format("%.1f", (double)actualBufferSize / (AUDIO_CART_SAMPLE_RATE * 2 * 2) * 1000));
        
        targetDataLine.start();
        
        // 等待一小段时间让缓冲区填充
        try {
            Thread.sleep(50); // 50ms
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
        
        log.info("任务 {} 音频采集设备初始化完成", taskId);
    }
    
    /**
     * 初始化MP3编码器
     */
    private void initializeMP3Encoder() {
        log.debug("任务 {} 初始化MP3编码器", taskId);
        
        AVCodec mp3Codec = avcodec_find_encoder(AV_CODEC_ID_MP3);
        if (mp3Codec == null) {
            throw new RuntimeException("未找到MP3编码器");
        }

        codecContext = avcodec_alloc_context3(mp3Codec);
        codecContext.codec_id(mp3Codec.id());
        codecContext.codec_type(AVMEDIA_TYPE_AUDIO);
        codecContext.bit_rate(128000);
        codecContext.sample_fmt(AV_SAMPLE_FMT_S16P);
        codecContext.sample_rate((int) AUDIO_CART_SAMPLE_RATE);

        channelLayout = new AVChannelLayout().retainReference();
        av_channel_layout_default(channelLayout, 2);
        codecContext.ch_layout(channelLayout);

        // 计算时间基准
        AVRational sample_rate = av_d2q(AUDIO_CART_SAMPLE_RATE, 1001000);
        AVRational time_base = av_inv_q(sample_rate);
        codecContext.time_base(time_base);

        // 设置采样位深
        codecContext.bits_per_raw_sample(16);

        // 处理实验性编码器
        if ((mp3Codec.capabilities() & AV_CODEC_CAP_EXPERIMENTAL) != 0) {
            codecContext.strict_std_compliance(FF_COMPLIANCE_EXPERIMENTAL);
        }

        int result = avcodec_open2(codecContext, mp3Codec, (AVDictionary)null);
        if (result < 0) {
            throw new RuntimeException("无法打开MP3编码器，错误码: " + result);
        }

        // 创建音频帧
        frame = av_frame_alloc();
        frame.nb_samples(codecContext.frame_size());
        frame.format(codecContext.sample_fmt());
        av_channel_layout_copy(frame.ch_layout(), codecContext.ch_layout());
        frame.sample_rate(codecContext.sample_rate());

        if (av_frame_get_buffer(frame, 0) < 0) {
            throw new RuntimeException("无法分配音频帧缓冲区");
        }
        
        if (av_frame_make_writable(frame) < 0) {
            throw new RuntimeException("无法使音频帧可写");
        }

        packet = av_packet_alloc();
        
        log.info("任务 {} MP3编码器初始化完成", taskId);
    }
    
    /**
     * 初始化重采样器
     */
    private void initializeResampler() {
        log.debug("任务 {} 初始化重采样器", taskId);
        
        swrContext = new SwrContext().retainReference();
        
        AVChannelLayout inputLayout = new AVChannelLayout().retainReference();
        AVChannelLayout outputLayout = new AVChannelLayout().retainReference();
        av_channel_layout_default(inputLayout, 2);
        av_channel_layout_default(outputLayout, 2);
        
        int ret = swr_alloc_set_opts2(swrContext,
                outputLayout, AV_SAMPLE_FMT_S16P, (int) AUDIO_CART_SAMPLE_RATE,
                inputLayout, AV_SAMPLE_FMT_S16, (int) AUDIO_CART_SAMPLE_RATE,
                0, null);
        
        inputLayout.releaseReference();
        outputLayout.releaseReference();
        
        if (ret < 0) {
            throw new RuntimeException("无法初始化重采样器，错误码: " + ret);
        }
        
        if (swr_init(swrContext) < 0) {
            throw new RuntimeException("无法启动重采样器");
        }
        
        log.info("任务 {} 重采样器初始化完成", taskId);
    }
    
    /**
     * 执行音频采集和处理
     */
    private void performAudioCapture() throws Exception {
        log.info("任务 {} 开始音频采集，预计时长: {}秒", taskId, durationSeconds);
        
        long startTime = System.nanoTime();
        long maxDurationNanos = durationSeconds * 1000 * 1000000L;
        int frameCount = 0;
        
        // 使用更小的缓冲区来减少延迟，防止数据丢失
        byte[] buffer = new byte[2304]; // 减小一半，提高读取频率
        
        // 计数器用于检测数据丢失
        long totalBytesRead = 0;
        long expectedBytes = 0;
        int emptyReadCount = 0;
        
        while (!shouldStop.get() && (System.nanoTime() - startTime) < maxDurationNanos) {
            try {
                // 检查缓冲区可用数据
                int available = targetDataLine.available();
                
                if (available == 0) {
                    emptyReadCount++;
                    if (emptyReadCount > 10) {
                        // 等待时间太长，可能有问题
                        log.trace("任务 {} 音频缓冲区持续为空，可能存在设备问题", taskId);
                        emptyReadCount = 0;
                    }
                    Thread.sleep(1); // 短暂停防止CPU占用过高
                    continue;
                }
                
                emptyReadCount = 0;
                
                // 读取数据，但不要超过可用数据量
                int bytesToRead = Math.min(buffer.length, available);
                int bytesRead = targetDataLine.read(buffer, 0, bytesToRead);
                
                if (bytesRead > 0) {
                    totalBytesRead += bytesRead;
                    
                    // 使用PointerScope来自动管理内存
                    try (PointerScope scope = new PointerScope()) {
                        processPCMToMP3(buffer, bytesRead, frameCount);
                    }
                    
                    frameCount++;
                    
                    // 每1000帧检查一次数据丢失
                    if (frameCount % 1000 == 0) {
                        long currentTime = System.nanoTime();
                        long elapsedNanos = currentTime - startTime;
                        long elapsedSeconds = elapsedNanos / 1000000000L;
                        
                        // 预期数据量 = 采样率 * 声道数 * 每样本字节数 * 时间
                        expectedBytes = (long)(AUDIO_CART_SAMPLE_RATE * 2 * 2 * elapsedSeconds);
                        
                        if (expectedBytes > 0) {
                            double dataLossRatio = 1.0 - (double)totalBytesRead / expectedBytes;
                            if (dataLossRatio > 0.01) { // 如果数据丢失超过1%
                                log.warn("任务 {} 检测到音频数据丢失: {}%, 预期:{} 字节, 实际:{} 字节", 
                                        taskId, String.format("%.2f", dataLossRatio * 100), expectedBytes, totalBytesRead);
                            }
                        }
                    }
                } else if (bytesRead < 0) {
                    log.error("任务 {} 音频读取错误: {}", taskId, bytesRead);
                    break;
                }
                
            } catch (Exception e) {
                log.error("任务 {} 音频处理出错", taskId, e);
                if (!shouldStop.get()) {
                    throw e;
                }
                break;
            }
        }
        
        // 刷新剩余样本
        flushAccumulatedSamples(frameCount);
        
        // 刷新编码器
        flushEncoder();
        
        long actualDuration = (System.nanoTime() - startTime) / 1000000000L;
        double finalDataLossRatio = expectedBytes > 0 ? 1.0 - (double)totalBytesRead / expectedBytes : 0.0;
        
        log.info("任务 {} 音频采集完成，实际时长: {}秒，处理帧数: {}, 数据丢失率: {}%",
                taskId, actualDuration, frameCount, String.format("%.2f", finalDataLossRatio * 100));
    }
    
    /**
     * 处理PCM数据转换为MP3
     */
    private void processPCMToMP3(byte[] buffer, int bytesRead, int frameCount) throws Exception {
        try (PointerScope scope = new PointerScope()) {
            
            int totalSamples = bytesRead / 4;
            int samplesPerChannel = totalSamples / 2;
            int inputChannels = 2;
            int inputDepth = 2;
            int outputFormat = codecContext.sample_fmt();
            int outputChannels = codecContext.ch_layout().nb_channels();
            int outputDepth = av_get_bytes_per_sample(outputFormat);
            
            BytePointer inputPointer = new BytePointer(buffer).retainReference();
            BytePointer[] samples_in = {inputPointer};
            
            int planes_out = av_sample_fmt_is_planar(outputFormat) != 0 ? codecContext.ch_layout().nb_channels() : 1;
            BytePointer[] samples_out = new BytePointer[planes_out];
            
            for (int i = 0; i < planes_out; i++) {
                BytePointer frameDataPtr = new BytePointer(frame.data(i));
                int data_size = av_samples_get_buffer_size((IntPointer)null, codecContext.ch_layout().nb_channels(),
                        codecContext.frame_size(), codecContext.sample_fmt(), 1) / planes_out;
                samples_out[i] = frameDataPtr.capacity(data_size);
            }
            
            PointerPointer input_plane_ptr = new PointerPointer(AVFrame.AV_NUM_DATA_POINTERS).retainReference();
            PointerPointer output_plane_ptr = frame.data();
            
            try {
                for (int i = 0; i < samples_in.length; i++) {
                    samples_in[i].position(0).limit(bytesRead);
                }
                
                for (int i = 0; i < samples_out.length; i++) {
                    samples_out[i].position(0).limit((int)samples_out[i].capacity());
                }
                
                while (true) {
                    int availableInputBytes = (int)(samples_in[0].limit() - samples_in[0].position());
                    int inputCount = availableInputBytes / (inputChannels * inputDepth);
                    
                    int availableOutputBytes = (int)(samples_out[0].limit() - samples_out[0].position());
                    int outputCount = availableOutputBytes / outputDepth;
                    
                    inputCount = Math.min(inputCount, Integer.MAX_VALUE);
                    outputCount = Math.min(outputCount, Integer.MAX_VALUE);
                    
                    if (inputCount <= 0) {
                        break;
                    }
                    if (outputCount <= 0) {
                        break;
                    }
                    
                    inputCount = Math.min(inputCount,
                            (outputCount * (int)AUDIO_CART_SAMPLE_RATE + codecContext.sample_rate() - 1) / codecContext.sample_rate());
                    
                    for (int i = 0; i < samples_in.length; i++) {
                        input_plane_ptr.put(i, samples_in[i]);
                    }
                    for (int i = 0; i < samples_out.length; i++) {
                        output_plane_ptr.put(i, samples_out[i]);
                    }
                    
                    int ret;
                    synchronized (swrLock) {
                        ret = swr_convert(swrContext, output_plane_ptr, outputCount, input_plane_ptr, inputCount);
                    }
                    
                    if (ret < 0) {
                        throw new Exception("swr_convert() error " + ret + ": Cannot convert audio samples.");
                    } else if (ret == 0) {
                        break;
                    }
                    
                    for (int i = 0; i < samples_in.length; i++) {
                        samples_in[i].position(samples_in[i].position() + inputCount * inputChannels * inputDepth);
                    }
                    for (int i = 0; i < samples_out.length; i++) {
                        samples_out[i].position(samples_out[i].position() + ret * outputDepth);
                    }
                    
                    accumulateSamplesAndEncode(ret, frameCount);
                    
                    if (samples_out[0].position() >= samples_out[0].limit()) {
                        break;
                    }
                }
            } finally {
                inputPointer.releaseReference();
                input_plane_ptr.releaseReference();
            }
        }
    }
    
    /**
     * 累积样本并编码
     */
    private void accumulateSamplesAndEncode(int convertedSamples, int frameCount) throws Exception {
        int outputDepth = av_get_bytes_per_sample(codecContext.sample_fmt());
        int planes_out = av_sample_fmt_is_planar(codecContext.sample_fmt()) != 0 ? codecContext.ch_layout().nb_channels() : 1;
        
        if (accumulatedSamples == null) {
            accumulatedSamples = new BytePointer[planes_out];
            for (int i = 0; i < planes_out; i++) {
                // 增大缓冲区以防止数据丢失
                int bufferSize = MP3_FRAME_SIZE * outputDepth * 4; // 4倍缓冲
                Pointer ptr = av_malloc(bufferSize);
                accumulatedSamples[i] = new BytePointer(ptr).retainReference().capacity(bufferSize);
            }
            accumulatedSampleCount = 0;
            log.debug("任务 {} 初始化累积缓冲区，大小: {} 样本", taskId, MP3_FRAME_SIZE * 4);
        }
        
        // 检查输入数据有效性
        if (convertedSamples <= 0) {
            log.warn("任务 {} 接收到无效样本数: {}", taskId, convertedSamples);
            return;
        }
        
        for (int plane = 0; plane < planes_out; plane++) {
            BytePointer srcPointer = new BytePointer(frame.data(plane));
            int bytesToCopy = convertedSamples * outputDepth;
            
            int currentPos = accumulatedSampleCount * outputDepth;
            
            // 检查缓冲区空间，如果不足则先编码存储的数据
            if (currentPos + bytesToCopy > accumulatedSamples[plane].capacity()) {
                log.warn("任务 {} 累积缓冲区将溢出，强制编码存储的数据", taskId);
                
                // 强制编码当前累积的数据
                if (accumulatedSampleCount >= MP3_FRAME_SIZE) {
                    // 编码尽可能多的完整帧
                    while (accumulatedSampleCount >= MP3_FRAME_SIZE) {
                        encodeOneFrame(frameCount);
                    }
                } else {
                    // 如果累积数据不足一个帧，则重置
                    log.warn("任务 {} 累积数据不足，重置缓冲区", taskId);
                    accumulatedSampleCount = 0;
                }
                currentPos = accumulatedSampleCount * outputDepth;
            }
            
            // 复制数据到累积缓冲区
            byte[] tempBuffer = new byte[bytesToCopy];
            srcPointer.position(0).get(tempBuffer);
            accumulatedSamples[plane].position(currentPos).put(tempBuffer);
        }
        
        accumulatedSampleCount += convertedSamples;
        
        // 当累积的样本数达到MP3帧大小时，进行编码
        while (accumulatedSampleCount >= MP3_FRAME_SIZE) {
            encodeOneFrame(frameCount);
        }
    }
    
    /**
     * 编码一个完整的MP3帧
     */
    private void encodeOneFrame(int frameCount) throws Exception {
        int outputDepth = av_get_bytes_per_sample(codecContext.sample_fmt());
        int planes_out = av_sample_fmt_is_planar(codecContext.sample_fmt()) != 0 ? codecContext.ch_layout().nb_channels() : 1;
        
        AVFrame encodeFrame = av_frame_alloc();
        try {
            encodeFrame.nb_samples(MP3_FRAME_SIZE);
            encodeFrame.format(codecContext.sample_fmt());
            av_channel_layout_copy(encodeFrame.ch_layout(), codecContext.ch_layout());
            encodeFrame.sample_rate(codecContext.sample_rate());
            
            // 修复PTS计算，确保连续性
            long ptsValue = totalEncodedFrames * MP3_FRAME_SIZE;
            encodeFrame.pts(ptsValue);
            
            // 更新全局帧计数器
            totalEncodedFrames++;
            
            if (av_frame_get_buffer(encodeFrame, 0) < 0) {
                throw new RuntimeException("Could not allocate encode frame buffer");
            }
            
            if (av_frame_make_writable(encodeFrame) < 0) {
                throw new RuntimeException("Could not make encode frame writable");
            }
            
            // 复制数据到编码帧
            for (int plane = 0; plane < planes_out; plane++) {
                BytePointer dstPointer = new BytePointer(encodeFrame.data(plane));
                int bytesToCopy = MP3_FRAME_SIZE * outputDepth;
                
                byte[] tempBuffer = new byte[bytesToCopy];
                accumulatedSamples[plane].position(0).get(tempBuffer);
                dstPointer.put(tempBuffer);
            }
            
            // 编码帧
            encodeAudioFrame(encodeFrame);
            
            // 更新累积计数器
            accumulatedSampleCount -= MP3_FRAME_SIZE;
            
            // 移动剩余数据到缓冲区开头
            if (accumulatedSampleCount > 0) {
                for (int plane = 0; plane < planes_out; plane++) {
                    int remainingBytes = accumulatedSampleCount * outputDepth;
                    int sourceOffset = MP3_FRAME_SIZE * outputDepth;
                    
                    byte[] tempBuffer = new byte[remainingBytes];
                    accumulatedSamples[plane].position(sourceOffset).get(tempBuffer);
                    accumulatedSamples[plane].position(0).put(tempBuffer);
                }
            }
            
        } finally {
            av_frame_free(encodeFrame);
        }
    }
    
    /**
     * 编码音频帧
     */
    private void encodeAudioFrame(AVFrame frame) throws Exception {
        int ret = avcodec_send_frame(codecContext, frame);
        if (ret < 0) {
            log.error("任务 {} 发送帧到编码器失败: {}", taskId, ret);
            return;
        }
        
        while (ret >= 0) {
            ret = avcodec_receive_packet(codecContext, packet);
            if (ret == AVERROR_EAGAIN() || ret == AVERROR_EOF) {
                break;
            } else if (ret < 0) {
                log.error("任务 {} 编码过程中出错: {}", taskId, ret);
                break;
            }
            
            BytePointer packetData = packet.data();
            int packetSize = packet.size();
            if (packetData != null && packetSize > 0) {
                byte[] mp3Data = new byte[packetSize];
                packetData.get(mp3Data);
                FileUtil.writeBytes(mp3Data, new File(outputFileName), 0, mp3Data.length, true);
            }
            
            av_packet_unref(packet);
        }
    }
    
    /**
     * 刷新剩余的累积样本
     */
    private void flushAccumulatedSamples(int frameCount) throws Exception {
        if (accumulatedSamples != null && accumulatedSampleCount > 0) {
            int outputDepth = av_get_bytes_per_sample(codecContext.sample_fmt());
            int planes_out = av_sample_fmt_is_planar(codecContext.sample_fmt()) != 0 ? codecContext.ch_layout().nb_channels() : 1;
            
            if (accumulatedSampleCount >= MP3_FRAME_SIZE) {
                int completeFrames = accumulatedSampleCount / MP3_FRAME_SIZE;
                
                for (int f = 0; f < completeFrames; f++) {
                    AVFrame encodeFrame = av_frame_alloc();
                    try {
                        encodeFrame.nb_samples(MP3_FRAME_SIZE);
                        encodeFrame.format(codecContext.sample_fmt());
                        av_channel_layout_copy(encodeFrame.ch_layout(), codecContext.ch_layout());
                        encodeFrame.sample_rate(codecContext.sample_rate());
                        encodeFrame.pts((frameCount + f) * MP3_FRAME_SIZE);
                        
                        if (av_frame_get_buffer(encodeFrame, 0) < 0) {
                            throw new RuntimeException("Could not allocate final encode frame buffer");
                        }
                        
                        if (av_frame_make_writable(encodeFrame) < 0) {
                            throw new RuntimeException("Could not make final encode frame writable");
                        }
                        
                        for (int plane = 0; plane < planes_out; plane++) {
                            BytePointer dstPointer = new BytePointer(encodeFrame.data(plane));
                            int bytesToCopy = MP3_FRAME_SIZE * outputDepth;
                            int sourceOffset = f * MP3_FRAME_SIZE * outputDepth;
                            
                            byte[] tempBuffer = new byte[bytesToCopy];
                            accumulatedSamples[plane].position(sourceOffset).get(tempBuffer);
                            dstPointer.put(tempBuffer);
                        }
                        
                        encodeAudioFrame(encodeFrame);
                        
                    } finally {
                        av_frame_free(encodeFrame);
                    }
                }
            }
        }
    }
    
    /**
     * 刷新编码器
     */
    private void flushEncoder() throws Exception {
        int ret = avcodec_send_frame(codecContext, null);
        while (ret >= 0) {
            ret = avcodec_receive_packet(codecContext, packet);
            if (ret == AVERROR_EAGAIN() || ret == AVERROR_EOF) {
                break;
            } else if (ret < 0) {
                break;
            }
            
            BytePointer packetData = packet.data();
            int packetSize = packet.size();
            if (packetData != null && packetSize > 0) {
                byte[] mp3Data = new byte[packetSize];
                packetData.get(mp3Data);
                FileUtil.writeBytes(mp3Data, new File(outputFileName), 0, mp3Data.length, true);
            }
            av_packet_unref(packet);
        }
    }
    
    /**
     * 清理资源
     */
    private void cleanup() {
        log.debug("任务 {} 清理资源", taskId);
        
        if (targetDataLine != null) {
            targetDataLine.stop();
            targetDataLine.close();
        }
        
        if (accumulatedSamples != null) {
            for (int i = 0; i < accumulatedSamples.length; i++) {
                if (accumulatedSamples[i] != null) {
                    av_free(accumulatedSamples[i].position(0));
                    accumulatedSamples[i].releaseReference();
                }
            }
            accumulatedSamples = null;
        }
        
        if (frame != null) {
            av_frame_free(frame);
        }
        
        if (codecContext != null) {
            avcodec_free_context(codecContext);
        }
        
        if (swrContext != null) {
            swrContext.releaseReference();
        }
        
        if (packet != null) {
            av_packet_free(packet);
        }
        
        if (channelLayout != null) {
            channelLayout.releaseReference();
        }
        
        log.debug("任务 {} 资源清理完成", taskId);
    }
}