package com.hushan.audio;

import cn.hutool.core.io.FileUtil;
import cn.hutool.core.util.ArrayUtil;
import cn.hutool.core.util.ByteUtil;
import io.quarkus.runtime.annotations.RegisterForReflection;
import jakarta.ws.rs.GET;
import jakarta.ws.rs.Path;
import jakarta.ws.rs.Produces;
import jakarta.ws.rs.core.MediaType;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.bytedeco.ffmpeg.avcodec.AVCodec;
import org.bytedeco.ffmpeg.avcodec.AVCodecContext;
import org.bytedeco.ffmpeg.avcodec.AVCodecParameters;
import org.bytedeco.ffmpeg.avcodec.AVPacket;
import org.bytedeco.ffmpeg.avformat.AVFormatContext;
import org.bytedeco.ffmpeg.avformat.AVInputFormat;
import org.bytedeco.ffmpeg.avformat.AVOutputFormat;
import org.bytedeco.ffmpeg.avformat.AVStream;
import org.bytedeco.ffmpeg.avutil.AVChannelLayout;
import org.bytedeco.ffmpeg.avutil.AVDictionary;
import org.bytedeco.ffmpeg.avutil.AVFrame;
import org.bytedeco.ffmpeg.avutil.AVRational;
import org.bytedeco.ffmpeg.swresample.SwrContext;
import org.bytedeco.ffmpeg.global.avformat;
import org.bytedeco.ffmpeg.global.avutil;
import org.bytedeco.javacpp.*;
import org.jboss.resteasy.util.DateUtil;

import javax.sound.sampled.*;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.nio.ByteOrder;
import java.util.Set;

import static org.bytedeco.ffmpeg.global.avcodec.*;
import static org.bytedeco.ffmpeg.global.avcodec.avcodec_open2;
import static org.bytedeco.ffmpeg.global.avcodec.avcodec_parameters_to_context;
import static org.bytedeco.ffmpeg.global.avdevice.avdevice_register_all;
import static org.bytedeco.ffmpeg.global.avformat.*;
import static org.bytedeco.ffmpeg.global.avutil.*;
import static org.bytedeco.ffmpeg.global.avutil.av_dict_set;
import static org.bytedeco.ffmpeg.global.swresample.*;import cn.hutool.core.io.FileUtil;
import cn.hutool.core.util.ArrayUtil;
import cn.hutool.core.util.ByteUtil;
import io.quarkus.runtime.annotations.RegisterForReflection;
import jakarta.ws.rs.GET;
import jakarta.ws.rs.Path;
import jakarta.ws.rs.Produces;
import jakarta.ws.rs.core.MediaType;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.bytedeco.ffmpeg.avcodec.AVCodec;
import org.bytedeco.ffmpeg.avcodec.AVCodecContext;
import org.bytedeco.ffmpeg.avcodec.AVCodecParameters;
import org.bytedeco.ffmpeg.avcodec.AVPacket;
import org.bytedeco.ffmpeg.avformat.AVFormatContext;
import org.bytedeco.ffmpeg.avformat.AVInputFormat;
import org.bytedeco.ffmpeg.avformat.AVOutputFormat;
import org.bytedeco.ffmpeg.avformat.AVStream;
import org.bytedeco.ffmpeg.avutil.AVChannelLayout;
import org.bytedeco.ffmpeg.avutil.AVDictionary;
import org.bytedeco.ffmpeg.avutil.AVFrame;
import org.bytedeco.ffmpeg.avutil.AVRational;
import org.bytedeco.ffmpeg.swresample.SwrContext;
import org.bytedeco.ffmpeg.global.avformat;
import org.bytedeco.ffmpeg.global.avutil;
import org.bytedeco.javacpp.*;
import org.jboss.resteasy.util.DateUtil;

import javax.sound.sampled.*;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.nio.ByteOrder;
import java.util.Set;

import static org.bytedeco.ffmpeg.global.avcodec.*;
import static org.bytedeco.ffmpeg.global.avcodec.avcodec_open2;
import static org.bytedeco.ffmpeg.global.avcodec.avcodec_parameters_to_context;
import static org.bytedeco.ffmpeg.global.avdevice.avdevice_register_all;
import static org.bytedeco.ffmpeg.global.avformat.*;
import static org.bytedeco.ffmpeg.global.avutil.*;
import static org.bytedeco.ffmpeg.global.avutil.av_dict_set;
import static org.bytedeco.ffmpeg.global.swresample.*;import cn.hutool.core.io.FileUtil;
import cn.hutool.core.util.ArrayUtil;
import cn.hutool.core.util.ByteUtil;
import io.quarkus.runtime.annotations.RegisterForReflection;
import jakarta.ws.rs.GET;
import jakarta.ws.rs.Path;
import jakarta.ws.rs.Produces;
import jakarta.ws.rs.core.MediaType;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.bytedeco.ffmpeg.avcodec.AVCodec;
import org.bytedeco.ffmpeg.avcodec.AVCodecContext;
import org.bytedeco.ffmpeg.avcodec.AVCodecParameters;
import org.bytedeco.ffmpeg.avcodec.AVPacket;
import org.bytedeco.ffmpeg.avformat.AVFormatContext;
import org.bytedeco.ffmpeg.avformat.AVInputFormat;
import org.bytedeco.ffmpeg.avformat.AVOutputFormat;
import org.bytedeco.ffmpeg.avformat.AVStream;
import org.bytedeco.ffmpeg.avutil.AVChannelLayout;
import org.bytedeco.ffmpeg.avutil.AVDictionary;
import org.bytedeco.ffmpeg.avutil.AVFrame;
import org.bytedeco.ffmpeg.avutil.AVRational;
import org.bytedeco.ffmpeg.swresample.SwrContext;
import org.bytedeco.ffmpeg.global.avformat;
import org.bytedeco.ffmpeg.global.avutil;
import org.bytedeco.javacpp.*;
import org.jboss.resteasy.util.DateUtil;

import javax.sound.sampled.*;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.nio.ByteOrder;
import java.util.Set;

import static org.bytedeco.ffmpeg.global.avcodec.*;
import static org.bytedeco.ffmpeg.global.avcodec.avcodec_open2;
import static org.bytedeco.ffmpeg.global.avcodec.avcodec_parameters_to_context;
import static org.bytedeco.ffmpeg.global.avdevice.avdevice_register_all;
import static org.bytedeco.ffmpeg.global.avformat.*;
import static org.bytedeco.ffmpeg.global.avutil.*;
import static org.bytedeco.ffmpeg.global.avutil.av_dict_set;
import static org.bytedeco.ffmpeg.global.swresample.*;

@Path("/hello2")
@Slf4j
public class CaptureService {
    //声卡采样率，固定44100
    private final static float AUDIO_CART_SAMPLE_RATE = 44100f;

    // 同步对象，防止并发访问SwrContext
    private final Object swrLock = new Object();

    // 用于累积音频样本的缓冲区
    private BytePointer[] accumulatedSamples = null;
    private int accumulatedSampleCount = 0;
    private final int MP3_FRAME_SIZE = 1152; // MP3编码器固定帧大小

    /**
     * 按照JavaCV标准模式处理PCM数据转换为MP3
     */
    private synchronized void processPCMToMP3(byte[] buffer, int bytesRead, AVCodecContext codecContext,
                                              AVFrame frame, SwrContext swrContext, AVPacket packet,
                                              int frameCount)
            throws Exception {
        try (PointerScope scope = new PointerScope()) {

            // 按照JavaCV标准处理音频样本 - 修复速度问题
            int totalSamples = bytesRead / 4; // 总样本数：2 channels * 2 bytes per sample = 4 bytes per sample pair
            int samplesPerChannel = totalSamples / 2; // 对于交错格式，每个声道的实际样本数是总样本数除以声道数
            int inputFormat = AV_SAMPLE_FMT_S16;  // 输入交错格式
            int inputChannels = 2;
            int inputDepth = 2; // 16-bit samples
            int outputFormat = codecContext.sample_fmt(); // S16P 平面格式
            int outputChannels = codecContext.ch_layout().nb_channels(); // 实际声道数
            int outputDepth = av_get_bytes_per_sample(outputFormat);

            // 创建输入指针数组（按照JavaCV模式）
            BytePointer inputPointer = new BytePointer(buffer).retainReference();
            BytePointer[] samples_in = {inputPointer}; // 交错格式只需要一个指针

            // 获取输出缓冲区（从帧中）
            int planes_out = av_sample_fmt_is_planar(outputFormat) != 0 ? codecContext.ch_layout().nb_channels() : 1;
            BytePointer[] samples_out = new BytePointer[planes_out];

            // 重要：从 AVFrame 中获取输出缓冲区指针，而不是创建新的
            for (int i = 0; i < planes_out; i++) {
                BytePointer frameDataPtr = new BytePointer(frame.data(i));
                // 计算输出平面的缓冲区大小
                int data_size = av_samples_get_buffer_size((IntPointer) null, codecContext.ch_layout().nb_channels(),
                        codecContext.frame_size(), codecContext.sample_fmt(), 1) / planes_out;
                samples_out[i] = frameDataPtr.capacity(data_size);
                log.debug("samples_out[{}] 初始化: capacity={}", i, samples_out[i].capacity());
            }

            // 创建平面指针
            PointerPointer input_plane_ptr = new PointerPointer(AVFrame.AV_NUM_DATA_POINTERS).retainReference();
            PointerPointer output_plane_ptr = frame.data();

            try {
                // 按照JavaCV标准设置指针的position和limit - 修复计算错误
                for (int i = 0; i < samples_in.length; i++) {
                    // 重要：初始化时position应该是0，limit应该是总字节数
                    samples_in[i].position(0).limit(bytesRead); // 直接使用读取的字节数
                    log.debug("samples_in[{}]: position={}, limit={}, capacity={}",
                            i, samples_in[i].position(), samples_in[i].limit(), samples_in[i].capacity());
                }

                // 设置输出缓冲区的limit（非常重要！）
                for (int i = 0; i < samples_out.length; i++) {
                    // 设置输出缓冲区的position为0，limit为缓冲区容量
                    samples_out[i].position(0).limit((int) samples_out[i].capacity());
                    log.debug("samples_out[{}]: position={}, limit={}, capacity={}",
                            i, samples_out[i].position(), samples_out[i].limit(), samples_out[i].capacity());
                }

                // 按照JavaCV标准的循环处理模式 - 修复速度问题
                while (true) {
                    //输入音频数据中每个声道的样本数量。指定需要转换的输入样本数量。
                    int availableInputBytes = (int)(samples_in[0].limit() - samples_in[0].position());
                    // 对于交错格式，每个样本包含所有声道的数据，所以需要除以声道数和每样本字节数
                    int inputCount = availableInputBytes / (inputChannels * inputDepth);
                    
                    //输出缓冲区中可以存储的样本数量（按每声道计算）。限制输出数据的最大样本数，避免缓冲区溢出。
                    int availableOutputBytes = (int)(samples_out[0].limit() - samples_out[0].position());
                    // 对于平面格式，每个平面只有一个声道，所以直接除以每样本字节数
                    int outputCount = availableOutputBytes / outputDepth;
                    
                    inputCount = Math.min(inputCount, Integer.MAX_VALUE);
                    outputCount = Math.min(outputCount, Integer.MAX_VALUE);

                    log.debug("计算结果: inputCount={}, outputCount={}, bytesRead={}, inputChannels={}, outputChannels={}", 
                            inputCount, outputCount, bytesRead, inputChannels, outputChannels);
                    log.debug("samples_in[0]: pos={}, limit={}, cap={}", 
                            samples_in[0].position(), samples_in[0].limit(), samples_in[0].capacity());
                    log.debug("samples_out[0]: pos={}, limit={}, cap={}", 
                            samples_out[0].position(), samples_out[0].limit(), samples_out[0].capacity());
                    
                    // 验证计算的正确性
                    if (inputCount <= 0) {
                        log.debug("inputCount <= 0, 退出循环");
                        break;
                    }
                    if (outputCount <= 0) {
                        log.warn("输出缓冲区不足，outputCount={}", outputCount);
                        break;
                    }

                    //按输出缓冲重新计算，避免溢出，取较小的值
                    inputCount = Math.min(inputCount,
                            (outputCount * (int) AUDIO_CART_SAMPLE_RATE + codecContext.sample_rate() - 1) / codecContext.sample_rate());

                    log.debug("最终计算: inputCount={}, outputCount={}", inputCount, outputCount);

                    //把输入输出采样数据都放到平面中，这种处理方式是把交错格式多个平面的数据放到一个指针数组中
                    for (int i = 0; i < samples_in.length; i++) {
                        input_plane_ptr.put(i, samples_in[i]);
                    }
                    for (int i = 0; i < samples_out.length; i++) {
                        output_plane_ptr.put(i, samples_out[i]);
                    }

                    //执行音频格式转换，通过平面读写其中包含的采样数据
                    int ret;
                    synchronized (swrLock) {
                        ret = swr_convert(swrContext, output_plane_ptr, outputCount, input_plane_ptr, inputCount);
                    }

                    if (ret < 0) {
                        log.error("swr_convert() error {}: Cannot convert audio samples", ret);
                        throw new Exception("swr_convert() error " + ret + ": Cannot convert audio samples.");
                    } else if (ret == 0) {
                        log.debug("重采样完成，退出循环");
                        break;
                    }
                    
                    // 记录重采样结果以诊断速度问题
                    log.debug("重采样结果: 输入{}=>{}输出, 输入指针前进{}bytes, 输出指针前进{}bytes",
                            inputCount, ret, inputCount * inputChannels * inputDepth, ret * outputDepth);

                    // 按照JavaCV模式更新指针位置 - 修复速度问题
                    for (int i = 0; i < samples_in.length; i++) {
                        // 交错格式：输入指针应该按照实际消耗的输入样本数进行更新
                        samples_in[i].position(samples_in[i].position() + inputCount * inputChannels * inputDepth);
                    }
                    for (int i = 0; i < samples_out.length; i++) {
                        // 平面格式：输出指针按照实际产生的样本数更新
                        samples_out[i].position(samples_out[i].position() + ret * outputDepth);
                    }

                    // 保存PCM数据以验证重采样是否正确
                    savePCMDataForVerification(samples_out, ret, outputDepth, planes_out);

                    // 累积样本直到满足MP3编码器的帧大小要求
                    accumulateSamplesAndEncode(codecContext, frame, packet, ret, frameCount);

                    // 检查是否需要继续处理
                    if (samples_out[0].position() >= samples_out[0].limit()) {
                        break;
                    }
                }
            } finally {
                inputPointer.releaseReference();
                input_plane_ptr.releaseReference();
            }
        }
    }

    /**
     * 保存PCM数据以验证重采样是否正确
     */
    private void savePCMDataForVerification(BytePointer[] samples_out, int convertedSamples,
                                            int outputDepth, int planes_out) {
        try {
            // 为每个平面保存PCM数据
            for (int plane = 0; plane < planes_out; plane++) {
                int bytesToSave = convertedSamples * outputDepth;
                if (bytesToSave > 0 && bytesToSave <= 8192) { // 限制大小防止过大文件
                    // 创建临时缓冲区读取数据
                    byte[] pcmData = new byte[bytesToSave];

                    // 保存当前指针位置
                    int currentPos = (int) samples_out[plane].position();

                    // 从当前位置往后读取数据（如果可能的话）
                    int readPos = Math.max(0, currentPos - bytesToSave);
                    if (readPos + bytesToSave <= samples_out[plane].capacity()) {
                        samples_out[plane].position(readPos);
                        samples_out[plane].get(pcmData);

                        // 恢复指针位置
                        samples_out[plane].position(currentPos);

                        // 保存到文件（为不同平面使用不同文件名）
                        String filename = String.format("resampled_plane%d.pcm", plane);
                        FileUtil.writeBytes(pcmData, new File(filename), 0, pcmData.length, true);

                        log.debug("保存PCM数据到 {}: {} 字节, {} 样本",
                                filename, pcmData.length, convertedSamples);
                    }
                }
            }
        } catch (Exception e) {
            log.warn("保存PCM数据时出错: {}", e.getMessage());
        }
    }

    /**
     * 累积样本直到满足MP3编码器的帧大小要求（1152样本）
     */
    private void accumulateSamplesAndEncode(AVCodecContext codecContext, AVFrame frame, AVPacket packet,
                                            int convertedSamples,
                                            int frameCount) throws Exception {

        int outputDepth = av_get_bytes_per_sample(codecContext.sample_fmt());
        int planes_out = av_sample_fmt_is_planar(codecContext.sample_fmt()) != 0 ? codecContext.ch_layout().nb_channels() : 1;

        // 初始化累积缓冲区
        if (accumulatedSamples == null) {
            accumulatedSamples = new BytePointer[planes_out];
            for (int i = 0; i < planes_out; i++) {
                int bufferSize = MP3_FRAME_SIZE * outputDepth * 2; // 留一些额外空间
                Pointer ptr = av_malloc(bufferSize);
                accumulatedSamples[i] = new BytePointer(ptr).retainReference().capacity(bufferSize);
            }
            accumulatedSampleCount = 0;
        }

        // 复制当前转换的样本到累积缓冲区
        for (int plane = 0; plane < planes_out; plane++) {
            BytePointer srcPointer = new BytePointer(frame.data(plane));
            int bytesToCopy = convertedSamples * outputDepth;

            // 检查缓冲区空间
            int currentPos = accumulatedSampleCount * outputDepth;
            if (currentPos + bytesToCopy > accumulatedSamples[plane].capacity()) {
                log.warn("累积缓冲区空间不足，重置缓冲区");
                accumulatedSampleCount = 0;
                currentPos = 0;
            }

            // 复制数据 - 使用临时缓冲区
            byte[] tempBuffer = new byte[bytesToCopy];
            srcPointer.position(0).get(tempBuffer);
            accumulatedSamples[plane].position(currentPos).put(tempBuffer);
        }

        accumulatedSampleCount += convertedSamples;
        log.debug("累积样本数: {}/{}", accumulatedSampleCount, MP3_FRAME_SIZE);

        // 当累积的样本数达到MP3帧大小时，进行编码
        if (accumulatedSampleCount >= MP3_FRAME_SIZE) {
            // 创建一个新的帧用于编码
            AVFrame encodeFrame = av_frame_alloc();
            try {
                encodeFrame.nb_samples(MP3_FRAME_SIZE);
                encodeFrame.format(codecContext.sample_fmt());
                av_channel_layout_copy(encodeFrame.ch_layout(), codecContext.ch_layout());
                encodeFrame.sample_rate(codecContext.sample_rate());
                // 正确计算PTS - 按照实际音频样本时间进行
                encodeFrame.pts(frameCount * MP3_FRAME_SIZE);

                // 分配帧缓冲区
                if (av_frame_get_buffer(encodeFrame, 0) < 0) {
                    throw new RuntimeException("Could not allocate encode frame buffer");
                }

                // 初始化帧缓冲区为0，防止电流声
                if (av_frame_make_writable(encodeFrame) < 0) {
                    throw new RuntimeException("Could not make encode frame writable");
                }

                // 复制数据到编码帧
                for (int plane = 0; plane < planes_out; plane++) {
                    BytePointer dstPointer = new BytePointer(encodeFrame.data(plane));
                    int bytesToCopy = MP3_FRAME_SIZE * outputDepth;

                    // 创建临时缓冲区来传输数据
                    byte[] tempBuffer = new byte[bytesToCopy];
                    accumulatedSamples[plane].position(0).get(tempBuffer);
                    dstPointer.put(tempBuffer);
                }

                // 编码帧
                log.debug("编码MP3帧: {} 样本", MP3_FRAME_SIZE);
                encodeAudioFrame(codecContext, encodeFrame, packet);

                // 更新累积计数器
                accumulatedSampleCount -= MP3_FRAME_SIZE;

                // 如果还有剩余数据，移动到缓冲区开头
                if (accumulatedSampleCount > 0) {
                    for (int plane = 0; plane < planes_out; plane++) {
                        int remainingBytes = accumulatedSampleCount * outputDepth;
                        int sourceOffset = MP3_FRAME_SIZE * outputDepth;

                        // 使用临时缓冲区移动数据
                        byte[] tempBuffer = new byte[remainingBytes];
                        accumulatedSamples[plane].position(sourceOffset).get(tempBuffer);
                        accumulatedSamples[plane].position(0).put(tempBuffer);
                    }
                }
            } finally {
                av_frame_free(encodeFrame);
            }
        }
    }

    /**
     * 刷新剩余的累积样本
     */
    private void flushAccumulatedSamples(AVCodecContext codecContext, AVPacket packet, int frameCount) throws Exception {
        if (accumulatedSamples != null && accumulatedSampleCount > 0) {
            log.debug("刷新剩余样本: {} 样本", accumulatedSampleCount);

            // 对于MP3编码器，最后一帧可以少于1152样本，但需要特殊处理
            // 为了避免-22错误，我们只在有足够数据时才编码，或者填充到标准帧大小
            if (accumulatedSampleCount < MP3_FRAME_SIZE) {
                log.warn("剩余样本数 {} 小于 MP3 标准帧大小 {}\uff0c丢弃这些数据以避免编码错误",
                        accumulatedSampleCount, MP3_FRAME_SIZE);
            } else {
                // 只编码完整的帧
                int completeFrames = accumulatedSampleCount / MP3_FRAME_SIZE;
                log.debug("编码剩余完整帧数: {}", completeFrames);

                for (int f = 0; f < completeFrames; f++) {
                    int outputDepth = av_get_bytes_per_sample(codecContext.sample_fmt());
                    int planes_out = av_sample_fmt_is_planar(codecContext.sample_fmt()) != 0 ? codecContext.ch_layout().nb_channels() : 1;

                    AVFrame encodeFrame = av_frame_alloc();
                    try {
                        encodeFrame.nb_samples(MP3_FRAME_SIZE);
                        encodeFrame.format(codecContext.sample_fmt());
                        av_channel_layout_copy(encodeFrame.ch_layout(), codecContext.ch_layout());
                        encodeFrame.sample_rate(codecContext.sample_rate());
                        // 正确计算PTS - 按照实际音频样本时间进行
                        encodeFrame.pts((frameCount + f) * MP3_FRAME_SIZE);

                        // 分配帧缓冲区
                        if (av_frame_get_buffer(encodeFrame, 0) < 0) {
                            throw new RuntimeException("Could not allocate final encode frame buffer");
                        }

                        // 初始化帧缓冲区为0，防止电流声
                        if (av_frame_make_writable(encodeFrame) < 0) {
                            throw new RuntimeException("Could not make final encode frame writable");
                        }

                        // 复制数据
                        for (int plane = 0; plane < planes_out; plane++) {
                            BytePointer dstPointer = new BytePointer(encodeFrame.data(plane));
                            int bytesToCopy = MP3_FRAME_SIZE * outputDepth;
                            int sourceOffset = f * MP3_FRAME_SIZE * outputDepth;

                            // 创建临时缓冲区来传输数据
                            byte[] tempBuffer = new byte[bytesToCopy];
                            accumulatedSamples[plane].position(sourceOffset).get(tempBuffer);
                            dstPointer.put(tempBuffer);
                        }

                        // 编码帧
                        encodeAudioFrame(codecContext, encodeFrame, packet);

                    } finally {
                        av_frame_free(encodeFrame);
                    }
                }
            }

            // 清理累积缓冲区
            for (int i = 0; i < accumulatedSamples.length; i++) {
                if (accumulatedSamples[i] != null) {
                    av_free(accumulatedSamples[i].position(0));
                    accumulatedSamples[i].releaseReference();
                }
            }
            accumulatedSamples = null;
            accumulatedSampleCount = 0;
        }
    }

    private void encodeAudioFrame(AVCodecContext codecContext, AVFrame frame, AVPacket packet) throws Exception {

        // 发送帧到编码器
        int ret = avcodec_send_frame(codecContext, frame);
        if (ret < 0) {
            log.error("Error sending frame to encoder: {}", ret);
            return;
        }

        // 获取编码后的数据包
        while (ret >= 0) {
            ret = avcodec_receive_packet(codecContext, packet);
            if (ret == AVERROR_EAGAIN() || ret == AVERROR_EOF) {
                break;
            } else if (ret < 0) {
                log.error("Error during encoding: {}", ret);
                break;
            }

            // 直接获取MP3数据并写入输出流
            BytePointer packetData = packet.data();
            int packetSize = packet.size();
            if (packetData != null && packetSize > 0) {
                byte[] mp3Data = new byte[packetSize];
                packetData.get(mp3Data);
                FileUtil.writeBytes(mp3Data, new File("test111.mp3"), 0, mp3Data.length, true);
                log.debug("Wrote MP3 packet: {} bytes", packetSize);
            }

            av_packet_unref(packet);
        }
    }

    @GET
    @Produces(MediaType.TEXT_PLAIN)
    @Path("capture")
    public String hello() throws LineUnavailableException, IOException {
        int channel = 0;
        // 获取系统默认的混音器
        Mixer.Info[] mixerInfo = AudioSystem.getMixerInfo();
        // 可以通过AudioDeviceInfo获取合适的声卡设备
        Mixer mixer = AudioSystem.getMixer(mixerInfo[5]);
        if (mixer == null) {
            throw new RuntimeException("No mixer found");
        }

        // 配置音频格式（实际应该根据查询到的声卡信息选择支持的参数）
        AudioFormat audioFormat = new AudioFormat(
                AudioFormat.Encoding.PCM_SIGNED,
                AUDIO_CART_SAMPLE_RATE,  // 采样率
                16,                      // 采样位深
                2,                       // 声道数
                (16 / 8) * 2,           // 帧大小：每帧字节数
                AUDIO_CART_SAMPLE_RATE, // 帧率应该等于采样率
                false);                  // 大端序

        // 获取 TargetDataLine
        DataLine.Info dataLineInfo = new DataLine.Info(TargetDataLine.class, audioFormat);
        TargetDataLine targetDataLine = (TargetDataLine) mixer.getLine(dataLineInfo);
        // 获取音频输入线路----->mp3一帧1152采样点，每个采样点4字节，刚好4608
        targetDataLine.open(audioFormat, (int) (4608 * 1.2));
        // 开始录音
        targetDataLine.start();

        // 初始化FFmpeg
        Loader.load(avutil.class);
        Loader.load(org.bytedeco.ffmpeg.global.swresample.class);
        Loader.load(org.bytedeco.ffmpeg.global.avcodec.class);
        Loader.load(avformat.class);
        Loader.load(org.bytedeco.ffmpeg.global.swscale.class);
        av_log_set_level(AV_LOG_INFO);
        av_jni_set_java_vm(Loader.getJavaVM(), null);
        avformat_network_init();
        Loader.load(org.bytedeco.ffmpeg.global.avdevice.class);
        avdevice_register_all();

        // 设置MP3编码器
        AVCodec mp3Codec = avcodec_find_encoder(AV_CODEC_ID_MP3);
        if (mp3Codec == null) {
            log.error("MP3 codec not found. Available encoders:");
            // 列出可用的编码器
            AVCodec codec = null;
            while ((codec = av_codec_iterate(codec)) != null) {
                if (av_codec_is_encoder(codec) != 0) {
                    log.info("Available encoder: {} ({})", codec.name().getString(), codec.long_name().getString());
                }
            }
            throw new RuntimeException("MP3 codec not found");
        }
        log.info("Found MP3 codec: {} ({})", mp3Codec.name().getString(), mp3Codec.long_name().getString());

        AVCodecContext codecContext = avcodec_alloc_context3(mp3Codec);
        codecContext.codec_id(mp3Codec.id());
        codecContext.codec_type(AVMEDIA_TYPE_AUDIO);
        codecContext.bit_rate(128000);  // 128 kbps
        codecContext.sample_fmt(AV_SAMPLE_FMT_S16P);  // 16-bit signed planar
        codecContext.sample_rate((int) AUDIO_CART_SAMPLE_RATE);

        //设置声道布局，如：av_channel_layout_default(&layout, 2)--->即设置为立体声（2 个声道）
        AVChannelLayout channelLayout = new AVChannelLayout().retainReference();
        av_channel_layout_default(channelLayout, 2);
        codecContext.ch_layout(channelLayout);

        // 计算时间基准：播放一帧需要的时间，例如mp3播放一帧需要26ms
        AVRational sample_rate = av_d2q(AUDIO_CART_SAMPLE_RATE, 1001000);
        AVRational time_base = av_inv_q(sample_rate);
        codecContext.time_base(time_base);
        // 设置位深
        codecContext.bits_per_raw_sample(16);

        // 判断音频格式是不是属于实验性的，如果是则需要激活允许实验特性
        if ((mp3Codec.capabilities() & AV_CODEC_CAP_EXPERIMENTAL) != 0) {
            codecContext.strict_std_compliance(FF_COMPLIANCE_EXPERIMENTAL);
            log.info("启用实验性编码器兼容性");
        }

        log.info("Codec configuration: sample_rate={}, channels={}, bit_rate={}, sample_fmt={}",
                codecContext.sample_rate(), codecContext.ch_layout().nb_channels(),
                codecContext.bit_rate(), codecContext.sample_fmt());

        int codecOpenResult = avcodec_open2(codecContext, mp3Codec, (AVDictionary) null);
        if (codecOpenResult < 0) {
            byte[] errorBuf = new byte[256];
            av_strerror(codecOpenResult, errorBuf, errorBuf.length);
            String errorStr = new String(errorBuf).trim();
            log.error("Could not open MP3 codec. Error code: {}, Error: {}", codecOpenResult, errorStr);
            throw new RuntimeException("Could not open MP3 codec: " + errorStr);
        }
        log.info("MP3 codec opened successfully");
        // 创建音频帧
        AVFrame frame = av_frame_alloc();
        if (frame == null) {
            throw new RuntimeException("Could not allocate audio frame");
        }

        frame.nb_samples(codecContext.frame_size());
        frame.format(codecContext.sample_fmt());
        av_channel_layout_copy(frame.ch_layout(), codecContext.ch_layout());
        frame.sample_rate(codecContext.sample_rate());

        int bufferResult = av_frame_get_buffer(frame, 0);
        if (bufferResult < 0) {
            av_frame_free(frame);
            throw new RuntimeException("Could not allocate audio frame buffer");
        }

        // 确保帧数据可写
        int makeWritableResult = av_frame_make_writable(frame);
        if (makeWritableResult < 0) {
            av_frame_free(frame);
            throw new RuntimeException("Could not make frame writable");
        }

        // 创建重采样上下文 (从交错格式转换到平面格式)
        SwrContext swrContext = new SwrContext().retainReference();

        // 使用现代 ch_layout API 代替已弃用的 channel_layout
        AVChannelLayout inputLayout = new AVChannelLayout().retainReference();
        AVChannelLayout outputLayout = new AVChannelLayout().retainReference();
        av_channel_layout_default(inputLayout, 2);  // 输入立体声
        av_channel_layout_default(outputLayout, 2); // 输出立体声

        // 使用swr_alloc_set_opts2设置转换器参数
        // 参数顺序：输出ch_layout, 输出format, 输出sample_rate, 输入ch_layout, 输入format, 输入sample_rate
        int ret = swr_alloc_set_opts2(swrContext,
                outputLayout, AV_SAMPLE_FMT_S16P, (int) AUDIO_CART_SAMPLE_RATE,  // 输出：平面格式
                inputLayout, AV_SAMPLE_FMT_S16, (int) AUDIO_CART_SAMPLE_RATE,   // 输入：交错格式（声卡采集）
                0, null);

        log.info("SwrContext配置: 输入=交错S16 {}Hz, 输出=平面S16P {}Hz",
                (int) AUDIO_CART_SAMPLE_RATE, (int) AUDIO_CART_SAMPLE_RATE);

        inputLayout.releaseReference();
        outputLayout.releaseReference();

        if (ret < 0) {
            swrContext.releaseReference();
            throw new RuntimeException("swr_alloc_set_opts2() error " + ret + ": Cannot allocate the conversion context");
        }

        // 初始化音频转换器
        int swrInitResult = swr_init(swrContext);
        if (swrInitResult < 0) {
            swrContext.releaseReference();
            throw new RuntimeException("swr_init() error " + swrInitResult + ": Cannot initialize the conversion context");
        }

        AVPacket packet = av_packet_alloc();
        long startTime = System.nanoTime();
        int frameCount = 0;

        // 音频数据处理循环 - 采用JavaCV标准模式
        while (System.nanoTime() - startTime < 15L * 1000 * 1000000) { // 录音15秒钟
            try (PointerScope scope = new PointerScope()) {
                byte[] buffer = new byte[4608];
                if (targetDataLine.available() == 0) {
                    continue;
                }
                int bytesRead = targetDataLine.read(buffer, 0, buffer.length);
                if (bytesRead > 0) {
                    // 按照JavaCV标准处理PCM数据
                    processPCMToMP3(buffer, bytesRead, codecContext, frame, swrContext,
                            packet, frameCount);
                    frameCount++;
                }
            } catch (Exception e) {
                log.error("音频处理出错", e);
                break;
            }
        }

        // 刷新剩余的累积样本
        try {
            flushAccumulatedSamples(codecContext, packet, frameCount);
        } catch (Exception e) {
            log.error("刷新累积样本时出错", e);
        }

        // 刷新编码器
        ret = avcodec_send_frame(codecContext, null);
        while (ret >= 0) {
            ret = avcodec_receive_packet(codecContext, packet);
            if (ret == AVERROR_EAGAIN() || ret == AVERROR_EOF) {
                break;
            } else if (ret < 0) {
                break;
            }

            // 获取最后MP3数据
            BytePointer packetData = packet.data();
            int packetSize = packet.size();
            if (packetData != null && packetSize > 0) {
                byte[] mp3Data = new byte[packetSize];
                packetData.get(mp3Data);
                FileUtil.writeBytes(mp3Data, new File("test111.mp3"), 0, mp3Data.length, true);
            }
            av_packet_unref(packet);
        }

        // 清理资源
        targetDataLine.stop();
        targetDataLine.close();
        av_frame_free(frame);
        avcodec_free_context(codecContext);
        swrContext.releaseReference();  // 使用releaseReference代替swr_free
        av_packet_free(packet);
        channelLayout.releaseReference();
        return "MP3 encoding completed! File saved as output.mp3";
    }
}
