package com.hushan.audio;

import cn.hutool.core.io.FileUtil;
import io.quarkus.runtime.annotations.RegisterForReflection;
import lombok.extern.slf4j.Slf4j;
import org.bytedeco.ffmpeg.avcodec.AVCodec;
import org.bytedeco.ffmpeg.avcodec.AVCodecContext;
import org.bytedeco.ffmpeg.avcodec.AVCodecParameters;
import org.bytedeco.ffmpeg.avcodec.AVPacket;
import org.bytedeco.ffmpeg.avformat.AVFormatContext;
import org.bytedeco.ffmpeg.avformat.AVInputFormat;
import org.bytedeco.ffmpeg.avformat.AVStream;
import org.bytedeco.ffmpeg.avutil.AVChannelLayout;
import org.bytedeco.ffmpeg.avutil.AVDictionary;
import org.bytedeco.ffmpeg.avutil.AVFrame;
import org.bytedeco.ffmpeg.avutil.AVRational;
import org.bytedeco.ffmpeg.global.avformat;
import org.bytedeco.ffmpeg.global.avutil;
import org.bytedeco.ffmpeg.swresample.SwrContext;
import org.bytedeco.javacpp.*;

import java.io.File;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicBoolean;

import static org.bytedeco.ffmpeg.global.avcodec.*;
import static org.bytedeco.ffmpeg.global.avdevice.avdevice_register_all;
import static org.bytedeco.ffmpeg.global.avformat.*;
import static org.bytedeco.ffmpeg.global.avutil.*;
import static org.bytedeco.ffmpeg.global.swresample.*;

/**
 * 音频采集任务 - 封装音频采集、重采样、MP3编码的完整流程
 * 使用FFmpeg实现，解决native模式兼容性问题
 */
@Slf4j
@RegisterForReflection
public class FFmpegAudioCaptureTask {

    // 音频采集参数
    private final static float AUDIO_CART_SAMPLE_RATE = 44100f;
    private final Object swrLock = new Object();

    // 任务控制
    private final AtomicBoolean isRunning = new AtomicBoolean(false);
    private final AtomicBoolean shouldStop = new AtomicBoolean(false);
    private CompletableFuture<Void> captureTask;

    // 任务配置
    private final String taskId;
    private final String outputFileName;
    private final int mixerIndex;
    private final long durationSeconds;

    // FFmpeg组件
    private AVFormatContext inputFormatContext;
    private AVStream audioStream;
    private AVCodecContext inputCodecContext;
    private AVCodecContext outputCodecContext;
    private SwrContext swrContext;
    private AVFrame inputFrame;
    private AVFrame outputFrame;
    private AVPacket inputPacket;
    private AVPacket outputPacket;
    private AVChannelLayout channelLayout;

    // 编码缓冲区
    private BytePointer[] accumulatedSamples = null;
    private int accumulatedSampleCount = 0;
    private long totalEncodedFrames = 0;
    private long totalSamplesProcessed = 0; // 跟踪实际处理的样本数，用于正确PTS计算
    
    // 设备检测器
    private FFmpegDeviceDetector deviceDetector = new FFmpegDeviceDetector();

    public FFmpegAudioCaptureTask(String taskId, String outputFileName, int mixerIndex, long durationSeconds) {
        this.taskId = taskId;
        this.outputFileName = outputFileName;
        this.mixerIndex = mixerIndex;
        this.durationSeconds = durationSeconds;
    }

    /**
     * 启动异步音频采集任务
     */
    public CompletableFuture<Void> startAsync() {
        if (isRunning.compareAndSet(false, true)) {
            shouldStop.set(false);
            captureTask = CompletableFuture.runAsync(this::runCaptureTask)
                    .whenComplete((result, throwable) -> {
                        isRunning.set(false);
                        cleanup();
                        if (throwable != null) {
                            log.error("音频采集任务 {} 执行失败", taskId, throwable);
                        } else {
                            log.info("音频采集任务 {} 完成", taskId);
                        }
                    });
            log.info("音频采集任务 {} 已启动", taskId);
            return captureTask;
        } else {
            throw new IllegalStateException("任务 " + taskId + " 已在运行中");
        }
    }

    /**
     * 停止音频采集任务
     */
    public void stop() {
        if (isRunning.get()) {
            shouldStop.set(true);
            log.info("正在停止音频采集任务 {}", taskId);

            // 等待任务完成
            if (captureTask != null) {
                try {
                    captureTask.join();
                } catch (Exception e) {
                    log.warn("等待任务 {} 停止时发生异常", taskId, e);
                }
            }
        } else {
            log.warn("任务 {} 未在运行", taskId);
        }
    }

    /**
     * 检查任务是否正在运行
     */
    public boolean isRunning() {
        return isRunning.get();
    }

    public String getTaskId() {
        return taskId;
    }

    public String getOutputFileName() {
        return outputFileName;
    }

    /**
     * 音频采集任务的主要执行逻辑
     */
    private void runCaptureTask() {
        try {
            initializeFFmpeg();
            initializeInputDevice();
            initializeMP3Encoder();
            initializeResampler();

            performAudioCapture();

        } catch (Exception e) {
            log.error("音频采集任务 {} 执行过程中发生错误", taskId, e);
            throw new RuntimeException("音频采集任务执行失败", e);
        }
    }

    /**
     * 初始化FFmpeg
     */
    private void initializeFFmpeg() {
        log.debug("任务 {} 初始化FFmpeg", taskId);

        Loader.load(avutil.class);
        Loader.load(org.bytedeco.ffmpeg.global.swresample.class);
        Loader.load(org.bytedeco.ffmpeg.global.avcodec.class);
        Loader.load(avformat.class);
        av_log_set_level(AV_LOG_INFO);
        av_jni_set_java_vm(Loader.getJavaVM(), null);
        avformat_network_init();
        Loader.load(org.bytedeco.ffmpeg.global.avdevice.class);
        avdevice_register_all();
    }

    /**
     * 初始化音频采集设备
     */
    private void initializeInputDevice() {
        log.debug("任务 {} 初始化音频采集设备", taskId);

        try (PointerScope scope = new PointerScope()) {
            // 根据操作系统选择输入格式
            String inputFormatName = getSystemInputFormat();
            log.info("任务 {} 使用输入格式: {}", taskId, inputFormatName);

            AVInputFormat inputFormat = av_find_input_format(inputFormatName);
            if (inputFormat == null) {
                throw new RuntimeException("无法找到输入格式: " + inputFormatName);
            }

            // 分配输入格式上下文
            inputFormatContext = avformat_alloc_context();
            if (inputFormatContext == null) {
                throw new RuntimeException("无法分配输入格式上下文");
            }

            // 构建设备名称
            String deviceName = buildDeviceName(inputFormatName);
            log.info("任务 {} 尝试打开设备: {}", taskId, deviceName);

            // 设置输入选项
            AVDictionary options = new AVDictionary(null);

            // 设置音频参数
            if (AUDIO_CART_SAMPLE_RATE > 0) {
                av_dict_set(options, "sample_rate", String.valueOf((int)AUDIO_CART_SAMPLE_RATE), 0);
            }

            // Windows DirectShow特定选项
            if ("dshow".equals(inputFormatName)) {
                av_dict_set(options, "audio_buffer_size", "50", 0);
                av_dict_set(options, "rtbufsize", "702000k", 0);
                av_dict_set(options, "channels", "2", 0);
            } else if ("alsa".equals(inputFormatName)) {
                // Linux ALSA选项
                av_dict_set(options, "channels", "2", 0);
            } else if ("avfoundation".equals(inputFormatName)) {
                // macOS AVFoundation选项
                av_dict_set(options, "channels", "2", 0);
            }

            // 打开输入设备
            int result = avformat_open_input(inputFormatContext, deviceName, inputFormat, options);
            if (result < 0) {
                av_dict_free(options);
                throw new RuntimeException(String.format("无法打开音频输入设备: %s, 错误码: %d", deviceName, result));
            }
            av_dict_free(options);

            // 设置最大延迟
            inputFormatContext.max_delay(-1);

            // 查找流信息
            result = avformat_find_stream_info(inputFormatContext, (PointerPointer) null);
            if (result < 0) {
                throw new RuntimeException("无法找到音频流信息, 错误码: " + result);
            }

            // 打印格式信息（调试用）
            if (av_log_get_level() >= AV_LOG_INFO) {
                av_dump_format(inputFormatContext, 0, deviceName, 0);
            }

            // 查找音频流
            int audioStreamIndex = av_find_best_stream(inputFormatContext, AVMEDIA_TYPE_AUDIO, -1, -1, (AVCodec) null, 0);
            if (audioStreamIndex < 0) {
                throw new RuntimeException("未找到音频流");
            }

            audioStream = inputFormatContext.streams(audioStreamIndex);
            AVCodecParameters codecParameters = audioStream.codecpar();

            // 查找解码器
            AVCodec decoder = avcodec_find_decoder(codecParameters.codec_id());
            if (decoder == null) {
                throw new RuntimeException("未找到音频解码器，codec_id: " + codecParameters.codec_id());
            }

            // 分配解码器上下文
            inputCodecContext = avcodec_alloc_context3(decoder);
            if (inputCodecContext == null) {
                throw new RuntimeException("无法分配音频解码器上下文");
            }

            // 复制流参数到解码器上下文
            result = avcodec_parameters_to_context(inputCodecContext, codecParameters);
            if (result < 0) {
                throw new RuntimeException("无法复制流参数到解码器上下文, 错误码: " + result);
            }

            // 设置线程数（多线程解码）
            inputCodecContext.thread_count(0);

            // 打开解码器
            result = avcodec_open2(inputCodecContext, decoder, (AVDictionary) null);
            if (result < 0) {
                throw new RuntimeException("无法打开音频解码器, 错误码: " + result);
            }

            // 分配输入帧
            inputFrame = av_frame_alloc();
            if (inputFrame == null) {
                throw new RuntimeException("无法分配音频输入帧");
            }

            log.info("任务 {} 音频输入设备初始化完成，采样率: {}, 声道数: {}, 格式: {}, 设备: {}",
                    taskId,
                    inputCodecContext.sample_rate(),
                    inputCodecContext.ch_layout().nb_channels(),
                    av_get_sample_fmt_name(inputCodecContext.sample_fmt()).getString(),
                    deviceName);
        }
    }

    /**
     * 根据操作系统获取输入格式
     */
    private String getSystemInputFormat() {
        String osName = System.getProperty("os.name").toLowerCase();

        if (osName.contains("windows")) {
            return "dshow";
        } else if (osName.contains("linux")) {
            return "alsa";
        } else if (osName.contains("mac")) {
            return "avfoundation";
        } else {
            return "dshow"; // 默认
        }
    }

    /**
     * 构建设备名称 - 从FFmpeg设备列表中根据index获取实际设备名称
     */
    private String buildDeviceName(String inputFormat) {
        try {
            // 尝试从FFmpeg设备列表中获取实际设备名称
            List<AudioMixerInfo> devices = deviceDetector.getAudioInputDevices();
            
            if (devices != null && !devices.isEmpty() && mixerIndex < devices.size()) {
                AudioMixerInfo deviceInfo = devices.get(mixerIndex);
                String deviceName = deviceInfo.getName();
                
                if (deviceName != null && !deviceName.trim().isEmpty()) {
                    log.info("任务 {} 使用FFmpeg检测到的设备 [{}]: {}", taskId, mixerIndex, deviceName);
                    
                    // 根据平台格式化设备名称
                    if ("dshow".equals(inputFormat)) {
                        // Windows DirectShow格式: audio=设备名称
                        return "audio=" + deviceName;
                    } else if ("alsa".equals(inputFormat)) {
                        // Linux ALSA格式: 使用设备名称或默认
                        if (mixerIndex == 0) {
                            return "default";
                        } else {
                            return deviceName.startsWith("hw:") ? deviceName : "hw:" + mixerIndex;
                        }
                    } else if ("avfoundation".equals(inputFormat)) {
                        // macOS AVFoundation格式: :索引
                        return ":" + mixerIndex;
                    }
                    
                    return deviceName;
                }
            }
            
            log.warn("任务 {} 无法从FFmpeg设备列表获取设备名称（index: {}），使用默认配置", taskId, mixerIndex);
            
        } catch (Exception e) {
            log.warn("任务 {} FFmpeg设备检测失败，使用默认配置: {}", taskId, e.getMessage());
        }
        
        // 回退到原有的硬编码设备名称逻辑
        return buildDefaultDeviceName(inputFormat);
    }
    
    /**
     * 构建默认设备名称（当FFmpeg设备检测失败时的回退方案）
     */
    private String buildDefaultDeviceName(String inputFormat) {
        if ("dshow".equals(inputFormat)) {
            // Windows DirectShow格式
            if (mixerIndex == 0) {
                return "audio=立体声混音 (Realtek High Definition Audio)"; // 默认音频设备
            } else {
                return "audio=@device_cm_{33D9A762-90C8-11d0-BD43-00A0C911CE86}\\wave_{" + mixerIndex + "}";
            }
        } else if ("alsa".equals(inputFormat)) {
            // Linux ALSA格式
            return mixerIndex == 0 ? "default" : "hw:" + mixerIndex;
        } else if ("avfoundation".equals(inputFormat)) {
            // macOS AVFoundation格式
            return ":" + mixerIndex; // :0, :1, etc.
        }
        
        return "default";
    }

    /**
     * 初始化MP3编码器
     */
    private void initializeMP3Encoder() {
        log.debug("任务 {} 初始化MP3编码器", taskId);

        AVCodec mp3Codec = avcodec_find_encoder(AV_CODEC_ID_MP3);
        if (mp3Codec == null) {
            throw new RuntimeException("未找到MP3编码器");
        }

        outputCodecContext = avcodec_alloc_context3(mp3Codec);
        outputCodecContext.codec_id(mp3Codec.id());
        outputCodecContext.codec_type(AVMEDIA_TYPE_AUDIO);
        outputCodecContext.bit_rate(128000);
        outputCodecContext.sample_fmt(AV_SAMPLE_FMT_S16P);
        outputCodecContext.sample_rate((int)AUDIO_CART_SAMPLE_RATE);

        channelLayout = new AVChannelLayout().retainReference();
        av_channel_layout_default(channelLayout, 2);
        outputCodecContext.ch_layout(channelLayout);

        // 时间基准配置 - 修复音频时长问题
        // 正确的时间基准应该是1/采样率，这样PTS可以直接用样本数计算
        AVRational time_base = new AVRational();
        time_base.num(1);
        time_base.den((int)AUDIO_CART_SAMPLE_RATE);
        outputCodecContext.time_base(time_base);
        outputCodecContext.bits_per_raw_sample(16);

        if ((mp3Codec.capabilities() & AV_CODEC_CAP_EXPERIMENTAL) != 0) {
            outputCodecContext.strict_std_compliance(FF_COMPLIANCE_EXPERIMENTAL);
        }

        int result = avcodec_open2(outputCodecContext, mp3Codec, (AVDictionary) null);
        if (result < 0) {
            throw new RuntimeException("无法打开MP3编码器，错误码: " + result);
        }

        // 创建输出音频帧
        outputFrame = av_frame_alloc();
        outputFrame.nb_samples(outputCodecContext.frame_size());
        outputFrame.format(outputCodecContext.sample_fmt());
        av_channel_layout_copy(outputFrame.ch_layout(), outputCodecContext.ch_layout());
        outputFrame.sample_rate(outputCodecContext.sample_rate());

        if (av_frame_get_buffer(outputFrame, 0) < 0) {
            throw new RuntimeException("无法分配输出音频帧缓冲区");
        }

        if (av_frame_make_writable(outputFrame) < 0) {
            throw new RuntimeException("无法使输出音频帧可写");
        }

        outputPacket = av_packet_alloc();

        log.info("任务 {} MP3编码器初始化完成，采样率: {}Hz, 比特率: {}bps, 格式: {}", 
                taskId, outputCodecContext.sample_rate(), outputCodecContext.bit_rate(), 
                av_get_sample_fmt_name(outputCodecContext.sample_fmt()).getString());
    }

    /**
     * 初始化重采样器
     */
    private void initializeResampler() {
        log.debug("任务 {} 初始化重采样器", taskId);

        swrContext = new SwrContext().retainReference();

        int ret = swr_alloc_set_opts2(swrContext,
                outputCodecContext.ch_layout(), outputCodecContext.sample_fmt(), outputCodecContext.sample_rate(),
                inputCodecContext.ch_layout(), inputCodecContext.sample_fmt(), inputCodecContext.sample_rate(),
                0, null);

        if (ret < 0) {
            throw new RuntimeException("无法初始化重采样器，错误码: " + ret);
        }

        if (swr_init(swrContext) < 0) {
            throw new RuntimeException("无法启动重采样器");
        }

        // 初始化累积缓冲区
        initializeAccumulationBuffer();

        log.info("任务 {} 重采样器初始化完成，输入采样率: {}Hz, 输出采样率: {}Hz, 输入格式: {}, 输出格式: {}",
                taskId, 
                inputCodecContext.sample_rate(), 
                outputCodecContext.sample_rate(),
                av_get_sample_fmt_name(inputCodecContext.sample_fmt()).getString(),
                av_get_sample_fmt_name(outputCodecContext.sample_fmt()).getString());
    }

    /**
     * 初始化样本累积缓冲区
     */
    private void initializeAccumulationBuffer() {
        int channels = outputCodecContext.ch_layout().nb_channels();
        int sampleSize = av_get_bytes_per_sample(outputCodecContext.sample_fmt());
        int bufferSize = outputCodecContext.frame_size() * sampleSize * 4; // 4倍缓冲

        accumulatedSamples = new BytePointer[channels];
        for (int i = 0; i < channels; i++) {
            accumulatedSamples[i] = new BytePointer(bufferSize);
        }

        accumulatedSampleCount = 0;
        log.debug("任务 {} 累积缓冲区初始化完成，大小: {} 字节/声道，帧大小: {} 样本", taskId, bufferSize, outputCodecContext.frame_size());
    }

    /**
     * 执行音频采集和处理
     */
    private void performAudioCapture() throws Exception {
        log.info("任务 {} 开始音频采集，预计时长: {}秒", taskId, durationSeconds);

        long startTime = System.nanoTime();
        long maxDurationNanos = durationSeconds * 1000 * 1000000L;
        int frameCount = 0;
        int packetCount = 0;

        inputPacket = av_packet_alloc();
        if (inputPacket == null) {
            throw new RuntimeException("无法分配输入包");
        }

        // 初始化包状态
        inputPacket.stream_index(-1);

        try {
            while (!shouldStop.get() && (System.nanoTime() - startTime) < maxDurationNanos) {
                try (PointerScope scope = new PointerScope()) {
                    // 读取音频包
                    if (inputPacket.stream_index() != -1) {
                        av_packet_unref(inputPacket);
                        inputPacket.stream_index(-1);
                    }

                    int result = av_read_frame(inputFormatContext, inputPacket);
                    if (result < 0) {
                        if (result == AVERROR_EAGAIN()) {
                            // 暂时没有数据，等待一下
                            try {
                                Thread.sleep(1);
                                continue;
                            } catch (InterruptedException ex) {
                                Thread.currentThread().interrupt();
                                break;
                            }
                        } else if (result == AVERROR_EOF) {
                            log.info("音频流结束");
                            break;
                        } else {
                            log.warn("读取音频帧失败: {}", result);
                            continue;
                        }
                    }

                    packetCount++;

                    // 确保是音频包
                    if (inputPacket.stream_index() != audioStream.index()) {
                        continue;
                    }

                    // 发送包到解码器
                    result = avcodec_send_packet(inputCodecContext, inputPacket);
                    if (result < 0 && result != AVERROR_EAGAIN()) {
                        log.warn("发送解码包失败: {}", result);
                        continue;
                    }

                    // 接收解码后的帧
                    while (result >= 0) {
                        result = avcodec_receive_frame(inputCodecContext, inputFrame);
                        if (result == AVERROR_EAGAIN() || result == AVERROR_EOF()) {
                            break;
                        } else if (result < 0) {
                            log.warn("接收解码帧失败: {}", result);
                            break;
                        }

                        // 处理解码后的音频帧
                        processDecodedFrame(inputFrame);
                        frameCount++;

                        // 每1000帧输出一次进度
                        if (frameCount % 1000 == 0) {
                            long elapsedNanos = System.nanoTime() - startTime;
                            long elapsedSeconds = elapsedNanos / 1000000000L;
                            log.debug("任务 {} 处理进度: {}秒, 帧数: {}, 包数: {}",
                                    taskId, elapsedSeconds, frameCount, packetCount);
                        }
                    }
                }
            }

            // 刷新解码器的缓存帧
            flushDecoder();

            // 刷新重采样器的缓存数据
            flushResampler();

            // 刷新累积的样本
            flushAccumulatedSamples();

            // 刷新编码器
            flushEncoder();

        } finally {
            if (inputPacket != null && inputPacket.stream_index() != -1) {
                av_packet_unref(inputPacket);
            }
        }

        long actualDuration = (System.nanoTime() - startTime) / 1000000000L;
        
        // 获取文件信息用于日志记录
        File outputFile = new File(outputFileName);
        String fileFormat = "MP3"; // 我们固定使用MP3编码
        long fileSize = outputFile.exists() ? outputFile.length() : 0;
        long bitRate = (actualDuration > 0) ? (fileSize * 8 / actualDuration) : 0;
        
        log.info("任务 {} 音频采集完成，实际时长: {}秒，处理帧数: {}, 包数: {}, 格式: {}, 采样率: {}Hz, 比特率: {}bps, 路径: {}, 大小: {}字节",
                taskId, actualDuration, frameCount, packetCount, fileFormat, 
                (int)AUDIO_CART_SAMPLE_RATE, bitRate, outputFileName, fileSize);
    }

    /**
     * 刷新解码器的缓存帧
     */
    private void flushDecoder() {
        try {
            // 发送NULL包刷新解码器
            avcodec_send_packet(inputCodecContext, null);

            while (true) {
                int result = avcodec_receive_frame(inputCodecContext, inputFrame);
                if (result == AVERROR_EOF) {
                    break;
                } else if (result < 0) {
                    log.debug("刷新解码器时接收帧失败: {}", result);
                    break;
                }

                processDecodedFrame(inputFrame);
            }
        } catch (Exception e) {
            log.warn("刷新解码器时发生错误", e);
        }
    }
    
    /**
     * 刷新重采样器的缓存数据
     */
    private void flushResampler() {
        try {
            synchronized (swrLock) {
                // 创建一个空帧用于刷新重采样器
                AVFrame flushFrame = av_frame_alloc();
                if (flushFrame != null) {
                    try {
                        // 发送NULL帧到重采样器以刷新缓存数据
                        int outputBufferSize = outputFrame.nb_samples();
                        int outputSamples = swr_convert(swrContext, 
                                outputFrame.data(), outputBufferSize, 
                                null, 0);
                        
                        if (outputSamples > 0) {
                            // 处理刷新出来的样本
                            accumulateSamplesAndEncode(outputFrame, outputSamples);
                        }
                        
                        // 继续接收可能剩余的数据
                        while (outputSamples > 0) {
                            outputSamples = swr_convert(swrContext, 
                                    outputFrame.data(), outputBufferSize, 
                                    null, 0);
                            
                            if (outputSamples > 0) {
                                accumulateSamplesAndEncode(outputFrame, outputSamples);
                            }
                        }
                    } finally {
                        av_frame_free(flushFrame);
                    }
                }
            }
        } catch (Exception e) {
            log.warn("刷新重采样器时发生错误", e);
        }
    }

    /**
     * 处理解码后的音频帧
     */
    private void processDecodedFrame(AVFrame decodedFrame) {
        try (PointerScope scope = new PointerScope()) {
            synchronized (swrLock) {
                // 重采样到目标格式
                // 修复：使用正确的输出缓冲区大小而不是帧大小
                int outputBufferSize = outputFrame.nb_samples();
                int outputSamples = swr_convert(swrContext,
                        outputFrame.data(), outputBufferSize,
                        decodedFrame.data(), decodedFrame.nb_samples());

                if (outputSamples < 0) {
                    log.warn("重采样失败: {}", outputSamples);
                    return;
                }

                if (outputSamples > 0) {
                    // 累积样本并编码
                    accumulateSamplesAndEncode(outputFrame, outputSamples);
                }
            }
        }
    }

    /**
     * 累积样本并编码
     */
    private void accumulateSamplesAndEncode(AVFrame frame, int sampleCount) {
        int channels = frame.ch_layout().nb_channels();
        int sampleSize = av_get_bytes_per_sample(frame.format());
        int bytesPerSample = sampleCount * sampleSize;

        for (int channel = 0; channel < channels; channel++) {
            BytePointer sourcePointer = new BytePointer(frame.data(channel));

            // 检查缓冲区空间
            if (accumulatedSampleCount * sampleSize + bytesPerSample > accumulatedSamples[channel].capacity()) {
                // 编码当前累积的样本
                if (accumulatedSampleCount >= outputCodecContext.frame_size()) {
                    encodeAccumulatedFrame();
                }
            }

            // 复制样本到累积缓冲区
            BytePointer targetPointer = accumulatedSamples[channel].position(accumulatedSampleCount * sampleSize);
            
            // 使用字节数组作为中间缓冲区
            byte[] tempBuffer = new byte[bytesPerSample];
            sourcePointer.get(tempBuffer);
            targetPointer.put(tempBuffer);
        }

        accumulatedSampleCount += sampleCount;

        // 如果累积足够的样本，进行编码
        while (accumulatedSampleCount >= outputCodecContext.frame_size()) {
            encodeAccumulatedFrame();
        }
    }

    /**
     * 编码累积的音频帧
     */
    private void encodeAccumulatedFrame() {
        try {
            // 设置输出帧数据
            for (int i = 0; i < outputFrame.ch_layout().nb_channels(); i++) {
                int sampleSize = av_get_bytes_per_sample(outputFrame.format());
                int frameBytes = outputCodecContext.frame_size() * sampleSize;

                BytePointer frameData = new BytePointer(outputFrame.data(i));
                
                // 使用字节数组作为中间缓冲区
                byte[] tempBuffer = new byte[frameBytes];
                accumulatedSamples[i].position(0).get(tempBuffer);
                frameData.position(0).put(tempBuffer);
            }

            outputFrame.nb_samples(outputCodecContext.frame_size());
            // 修复PTS计算 - 使用实际处理的样本数而不是帧数
            // PTS = 已处理的样本数，时间基准为1/采样率
            outputFrame.pts(totalSamplesProcessed);

            // 发送帧到编码器
            int result = avcodec_send_frame(outputCodecContext, outputFrame);
            if (result < 0) {
                log.warn("发送编码帧失败: {}", result);
                return;
            }

            // 接收编码后的包
            while (result >= 0) {
                result = avcodec_receive_packet(outputCodecContext, outputPacket);
                if (result == AVERROR_EAGAIN() || result == AVERROR_EOF) {
                    break;
                } else if (result < 0) {
                    log.warn("接收编码包失败: {}", result);
                    break;
                }

                // 写入MP3数据
                writeMP3Data(outputPacket);
                av_packet_unref(outputPacket);
            }

            totalEncodedFrames++;
            // 更新处理的样本数计数器
            totalSamplesProcessed += outputCodecContext.frame_size();

            // 移动剩余样本到缓冲区开始位置
            int remainingSamples = accumulatedSampleCount - outputCodecContext.frame_size();
            if (remainingSamples > 0) {
                int sampleSize = av_get_bytes_per_sample(outputFrame.format());
                int channels = outputFrame.ch_layout().nb_channels();

                for (int i = 0; i < channels; i++) {
                    BytePointer src = accumulatedSamples[i].position(outputCodecContext.frame_size() * sampleSize);
                    BytePointer dst = accumulatedSamples[i].position(0);

                    byte[] tempBuffer = new byte[remainingSamples * sampleSize];
                    src.get(tempBuffer);
                    dst.put(tempBuffer);
                }
            }

            accumulatedSampleCount = remainingSamples;

        } catch (Exception e) {
            log.error("编码音频帧时发生错误", e);
        }
    }

    /**
     * 刷新累积的样本
     */
    private void flushAccumulatedSamples() {
        while (accumulatedSampleCount > 0) {
            encodeAccumulatedFrame();
        }
    }

    /**
     * 刷新编码器
     */
    private void flushEncoder() {
        try {
            // 发送NULL帧刷新编码器
            avcodec_send_frame(outputCodecContext, null);

            while (true) {
                int result = avcodec_receive_packet(outputCodecContext, outputPacket);
                if (result == AVERROR_EOF) {
                    break;
                } else if (result < 0) {
                    log.warn("刷新编码器时接收包失败: {}", result);
                    break;
                }

                writeMP3Data(outputPacket);
                av_packet_unref(outputPacket);
            }
        } catch (Exception e) {
            log.error("刷新编码器时发生错误", e);
        }
    }

    /**
     * 写入MP3数据到文件
     */
    private void writeMP3Data(AVPacket packet) {
        try {
            int size = packet.size();
            if (size > 0) {
                byte[] data = new byte[size];
                packet.data().get(data);

                File outputFile = new File(outputFileName);
                FileUtil.writeBytes(data, outputFile, 0, data.length, true);
            }
        } catch (Exception e) {
            log.error("写入MP3数据失败", e);
        }
    }

    /**
     * 清理资源
     */
    private void cleanup() {
        try {
            log.debug("任务 {} 清理FFmpeg资源", taskId);

            // 清理累积缓存区
            if (accumulatedSamples != null) {
                for (BytePointer pointer : accumulatedSamples) {
                    if (pointer != null) {
                        pointer.deallocate();
                    }
                }
                accumulatedSamples = null;
            }

            // 清理重采样器
            if (swrContext != null) {
                swr_free(swrContext);
                swrContext.releaseReference();
                swrContext = null;
            }

            // 清理输入解码器
            if (inputCodecContext != null) {
                avcodec_free_context(inputCodecContext);
                inputCodecContext = null;
            }

            // 清理输出编码器
            if (outputCodecContext != null) {
                avcodec_free_context(outputCodecContext);
                outputCodecContext = null;
            }

            // 清理输入格式上下文
            if (inputFormatContext != null) {
                avformat_close_input(inputFormatContext);
                inputFormatContext = null;
            }

            // 清理帧
            if (inputFrame != null) {
                av_frame_free(inputFrame);
                inputFrame = null;
            }

            if (outputFrame != null) {
                av_frame_free(outputFrame);
                outputFrame = null;
            }

            // 清理包
            if (inputPacket != null) {
                if (inputPacket.stream_index() != -1) {
                    av_packet_unref(inputPacket);
                }
                av_packet_free(inputPacket);
                inputPacket = null;
            }

            if (outputPacket != null) {
                av_packet_free(outputPacket);
                outputPacket = null;
            }

            // 清理声道布局
            if (channelLayout != null) {
                channelLayout.releaseReference();
                channelLayout = null;
            }

            // 重置状态
            accumulatedSampleCount = 0;
            totalEncodedFrames = 0;

            log.debug("任务 {} FFmpeg资源清理完成", taskId);

        } catch (Exception e) {
            log.warn("清理FFmpeg资源时发生错误", e);
        }
    }
}