#include "audiorecorder.h"
#include <thread>
extern "C"{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/imgutils.h"
#include "libswresample/swresample.h"
#include <libavutil/avassert.h>
};


#include <QDebug>
using namespace std;
int g_aCollectFrameCnt = 0;	//音频采集帧数
int g_aEncodeFrameCnt = 0;	//音频编码帧数

AudioRecorder::AudioRecorder(QObject *parent)
    : QObject{parent}
    , m_swrCtx(nullptr)
    , m_state(RecordState::NotStarted)
    , m_aCurPts(0)
{

}

void AudioRecorder::Init(const QVariantMap &map)
{
    // 输出文件的路径
    m_filePath = map["filePath"].toString();

    // 音频的码率
    m_audioBitrate = map["audioBitrate"].toInt();
}

void AudioRecorder::Start()
{
    if (m_state == RecordState::NotStarted)
    {
        qDebug() << "start record";
        m_state = RecordState::Started;
        //c++11:thread, detach
        std::thread muxThread(&AudioRecorder::MuxThreadProc, this);
        muxThread.detach();
    }
}

void AudioRecorder::Stop()
{
    qDebug() << "stop record";
    /////RecordState state = m_state;
    m_state = RecordState::Stopped;
}

void AudioRecorder::MuxThreadProc()
{
    int ret = -1;
    bool done = false;
    int aFrameIndex = 0;


    avdevice_register_all();


    // 打开麦克风
    if (OpenAudio() < 0)
        return;

    // 打开输出文件或推流地址
    if (OpenOutput() < 0)
        return;

    /// 初始共享队列
    InitAudioBuffer();

    //启动音视频数据采集线程：生产者线程
    std::thread soundRecord(&AudioRecorder::SoundRecordThreadProc, this);
    soundRecord.detach();

    while (1)
    {
        if (m_state == RecordState::Stopped && !done)
            done = true;

        if (done)
        {
            lock_guard<mutex> lk(m_mtxABuf);
            if (av_audio_fifo_size(m_aFifoBuf) < m_nbSamples)
            {
                qDebug() << "audio write done";
                break;
            }
        }
        else
        {

            /// 只要不空，就拿数据
            /// 如果为空，就wait。此时就阻塞了，该线程停止往下运行
            ///
            unique_lock<mutex> lk(m_mtxABuf);
            m_cvABufNotEmpty.wait(lk,
                                  [this]
                                  { return av_audio_fifo_size(m_aFifoBuf) >= m_nbSamples; });
        }

        /// 已经从 共享队列中拿出了PCM数据，然后开始编码、封装、存储
        int ret = -1;
        AVFrame *aFrame = av_frame_alloc(); //分配AVFrame大结构的数据内存：浅拷贝
        aFrame->nb_samples = m_nbSamples;
        aFrame->channel_layout = m_aEncodeCtx->channel_layout;
        aFrame->format = m_aEncodeCtx->sample_fmt;//编码器的采样格式：(mp3:s16p,aac:fltp)
        aFrame->sample_rate = m_aEncodeCtx->sample_rate;
        aFrame->pts = m_nbSamples * aFrameIndex++;
        //分配data buf
        ret = av_frame_get_buffer(aFrame, 0); //分配AVFrame相关的音视频数据的内存，深拷贝

        //真正从Audiofifo中拿出PCM数据
        av_audio_fifo_read(m_aFifoBuf, (void **)aFrame->data, m_nbSamples);

        m_cvABufNotFull.notify_one();

        AVPacket pkt = { 0 };
        av_init_packet(&pkt);
        /// 发给编码器：aac、mp3
        ret = avcodec_send_frame(m_aEncodeCtx, aFrame);
        if (ret != 0)
        {
            qDebug() << "audio avcodec_send_frame failed, ret: " << ret;
            av_frame_free(&aFrame);
            av_packet_unref(&pkt);
            continue;
        }
        ret = avcodec_receive_packet(m_aEncodeCtx, &pkt);
        if (ret != 0)
        {
            qDebug() << "audio avcodec_receive_packet failed, ret: " << ret;
            av_frame_free(&aFrame);
            av_packet_unref(&pkt);
            continue;
        }
        pkt.stream_index = m_aOutIndex;

        /// 时间基：将 编码器的时间基 调整为 封装格式的时间基
        av_packet_rescale_ts(&pkt,
                             m_aEncodeCtx->time_base,
                             m_oFmtCtx->streams[m_aOutIndex]->time_base);

        /// 打时间戳
        m_aCurPts = pkt.pts;
        qDebug() << "aCurPts: " << m_aCurPts;

        // 交替写 音频或视频 数据（AVPacket）
        ret = av_interleaved_write_frame(m_oFmtCtx, &pkt);
        if (ret == 0)
            qDebug() << "Write audio packet id: " << ++g_aEncodeFrameCnt;
        else
            qDebug() << "audio av_interleaved_write_frame failed, ret: " << ret;

        av_frame_free(&aFrame);
        av_packet_unref(&pkt);

    }
    FlushEncoders();

    /// 完毕后，一定要写“尾巴”
    av_write_trailer(m_oFmtCtx);
    Release();
    qDebug() << "parent thread exit";
}

void AudioRecorder::SoundRecordThreadProc()
{
    int ret = -1;
    AVPacket pkt = { 0 };
    av_init_packet(&pkt);
    int nbSamples = m_nbSamples;
    int dstNbSamples, maxDstNbSamples;
    AVFrame *rawFrame = av_frame_alloc();
    AVFrame *newFrame = AllocAudioFrame(m_aEncodeCtx, nbSamples);

    maxDstNbSamples = dstNbSamples = av_rescale_rnd(nbSamples,
                                                    m_aEncodeCtx->sample_rate, m_aDecodeCtx->sample_rate, AV_ROUND_UP);

    while (m_state != RecordState::Stopped)
    {
        ///// 暂停
        //        if (m_state == RecordState::Paused)
        //        {
        //            unique_lock<mutex> lk(m_mtxPause);
        //            m_cvNotPause.wait(lk, [this] { return m_state != RecordState::Paused; });
        //        }
        if (av_read_frame(m_aFmtCtx, &pkt) < 0)
        {
            qDebug() << "audio av_read_frame < 0";
            continue;
        }
        if (pkt.stream_index != m_aIndex)
        {
            qDebug() << "not a audio packet";
            av_packet_unref(&pkt);
            continue;
        }

        /// 发给 解码器（麦克风出来时是pcm，需要用ffmpeg解码
        ret = avcodec_send_packet(m_aDecodeCtx, &pkt);
        if (ret != 0)
        {
            qDebug() << "audio avcodec_send_packet failed, ret: " << ret;
            av_packet_unref(&pkt);
            continue;
        }
        /// 这个是原始的 麦克风的 PCM数据
        ret = avcodec_receive_frame(m_aDecodeCtx, rawFrame);
        if (ret != 0)
        {
            qDebug() << "audio avcodec_receive_frame failed, ret: " << ret;
            av_packet_unref(&pkt);
            continue;
        }
        ++g_aCollectFrameCnt;

        /// 计算 目标采样点数
        dstNbSamples = av_rescale_rnd(swr_get_delay(m_swrCtx, m_aDecodeCtx->sample_rate) + rawFrame->nb_samples,
                                      m_aEncodeCtx->sample_rate, m_aDecodeCtx->sample_rate, AV_ROUND_UP);
        if (dstNbSamples > maxDstNbSamples)
        {
            qDebug() << "audio newFrame realloc";
            av_freep(&newFrame->data[0]);
            //nb_samples*nb_channels*Bytes_sample_fmt
            ret = av_samples_alloc(newFrame->data, newFrame->linesize, m_aEncodeCtx->channels,
                                   dstNbSamples, m_aEncodeCtx->sample_fmt, 1);
            if (ret < 0)
            {
                qDebug() << "av_samples_alloc failed";
                return;
            }

            maxDstNbSamples = dstNbSamples;
            m_aEncodeCtx->frame_size = dstNbSamples;
            m_nbSamples = newFrame->nb_samples;

        }

        /// 重采样，是为了 适配 编码器或者播放器
        newFrame->nb_samples = swr_convert(m_swrCtx, newFrame->data, dstNbSamples,
                                           (const uint8_t **)rawFrame->data, rawFrame->nb_samples);

        //m_nbSamples = newFrame->nb_samples;
        if (newFrame->nb_samples < 0)
        {
            qDebug() << "swr_convert error";
            return;
        }
        {

            /// 判断共享队列是否已满
            /// 如果已满，那就等待
            /// 如果未满，则将刚才重采样后的PCM数据，放到 fifo中。
            unique_lock<mutex> lk(m_mtxABuf);
            m_cvABufNotFull.wait(lk,
                                 [newFrame, this]  //捕获列表
                                 /// "不空"的判断条件：{ return av_audio_fifo_size(m_aFifoBuf) >= m_nbSamples; });
                                 /// "不满"的判断条件：队列中剩余的空间大于“采样点数”
                                 { return av_audio_fifo_space(m_aFifoBuf) >= newFrame->nb_samples; });
        }
        /// 重采样完毕，将PCM放到 共享队列中
        if (av_audio_fifo_write(m_aFifoBuf, (void **)newFrame->data, newFrame->nb_samples) < newFrame->nb_samples)
        {
            qDebug() << "av_audio_fifo_write";
            return;
        }


        m_cvABufNotEmpty.notify_one();
    }
    FlushAudioDecoder();
    av_frame_free(&rawFrame);
    av_frame_free(&newFrame);
    qDebug() << "sound record thread exit";
}

AVFrame *AudioRecorder::AllocAudioFrame(AVCodecContext *c, int nbSamples)
{
    AVFrame *frame = av_frame_alloc();
    int ret;

    /// 为帧AVFrame配置参数
    frame->format = c->sample_fmt;
    frame->ch_layout = c->ch_layout;
    frame->sample_rate = c->sample_rate;
    frame->nb_samples = nbSamples;

    if (nbSamples)
    {
        /// 分配存储音视频原始帧数据的buffer
        ret = av_frame_get_buffer(frame, 0);
        if (ret < 0)
        {
            qDebug() << "av_frame_get_buffer failed";
            return nullptr;
        }
    }
    return frame;
}


/// 检测采样格式
static int check_sample_fmt(const AVCodec *codec, enum AVSampleFormat sample_fmt)
{
    const enum AVSampleFormat *p = codec->sample_fmts;

    while (*p != AV_SAMPLE_FMT_NONE) {
        if (*p == sample_fmt)
            return 1;
        p++;
    }
    return 0;
}

int AudioRecorder::OpenAudio()
{
    m_aFmtCtx = avformat_alloc_context();
    int ret = -1;
    AVCodec *decoder = nullptr;
    //    qDebug() <<"mkphone:" << GetMicrophoneDeviceName();
    //    qDebug() <<"speaker:" << GetSpeakerDeviceName();

    AVInputFormat *ifmt = (AVInputFormat *)av_find_input_format("dshow");
    ///QString audioDeviceName = "audio="+GetMicrophoneDeviceName();
    ///"audio=麦克风 (Realtek High Definition Audio)";
    /// /// + GetMicrophoneDeviceName();


    QString audioDeviceName = "audio=" + m_micro;/// + GetMicrophoneDeviceName();
    qDebug() << audioDeviceName;

    if (avformat_open_input(&m_aFmtCtx, audioDeviceName.toStdString().c_str(), ifmt, nullptr) < 0)
    {
        qDebug() << "Can not open audio input stream";
        return -1;
    }
    if (avformat_find_stream_info(m_aFmtCtx, nullptr) < 0)
        return -1;

    for (int i = 0; i < m_aFmtCtx->nb_streams; ++i)
    {
        AVStream * stream = m_aFmtCtx->streams[i];
        if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            decoder = (AVCodec *)avcodec_find_decoder(stream->codecpar->codec_id);
            if (decoder == nullptr)
            {
                printf("Codec not found.（没有找到解码器）\n");
                return -1;
            }
            //从视频流中拷贝参数到codecCtx
            m_aDecodeCtx = avcodec_alloc_context3(decoder);
            if ((ret = avcodec_parameters_to_context(m_aDecodeCtx, stream->codecpar)) < 0)
            {
                qDebug() << "Audio avcodec_parameters_to_context failed,error code: " << ret;
                return -1;
            }
            m_aIndex = i;
            break;
        }
    }
    if (0 > avcodec_open2(m_aDecodeCtx, decoder, NULL))
    {
        printf("can not find or open audio decoder!\n");
        return -1;
    }
    return 0;
}

int AudioRecorder::OpenOutput()
{
    int ret = -1;
    AVStream *aStream = nullptr;
    /// 根据文件名的后缀来判断类型
    const char *outFileName = m_filePath.toStdString().c_str(); //"test2.mp3";
    bool bIsRtmp = false;   //是否RTMP直播推流
    if(m_filePath.indexOf("rtmp://") >= 0){
        bIsRtmp = true;
    }
    /////ret = avformat_alloc_output_context2(&m_oFmtCtx, nullptr, nullptr, outFileName);
    ret = avformat_alloc_output_context2(&m_oFmtCtx, nullptr, bIsRtmp ? "flv" : nullptr, outFileName);
    if (ret < 0)
    {
        qDebug() << "avformat_alloc_output_context2 failed";
        return -1;
    }


    if (m_aFmtCtx->streams[m_aIndex]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
    {
        /// 创建一路新流：音频流或视频流
        aStream = avformat_new_stream(m_oFmtCtx, NULL);
        if (!aStream)
        {
            printf("can not new audio stream for output!\n");
            return -1;
        }
        m_aOutIndex = aStream->index;


        AVCodec *encoder = (AVCodec *)avcodec_find_encoder(m_oFmtCtx->oformat->audio_codec);
        if(m_oFmtCtx->oformat->audio_codec == AV_CODEC_ID_MP3){
            ///如果是mp3的话，需要用第三方的libmp3lame
            /// 此时，需要根据  编码器名称 来查找
            encoder = (AVCodec *)avcodec_find_encoder_by_name("libmp3lame");
        }

        if (!encoder)
        {
            qDebug() << "Can not find audio encoder, id: " << m_oFmtCtx->oformat->audio_codec;
            return -1;
        }
        else{
            qDebug() << "Can find audio encoder : libmp3lame";
        }

        /// 分配编码器的上下文
        m_aEncodeCtx = avcodec_alloc_context3(encoder);
        if (nullptr == m_aEncodeCtx)
        {
            qDebug() << "audio avcodec_alloc_context3 failed";
            return -1;
        }
        /// 设置编码器参数
        m_aEncodeCtx->sample_fmt = encoder->sample_fmts ? encoder->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
        m_aEncodeCtx->bit_rate = m_audioBitrate;
        m_aEncodeCtx->sample_rate = 44100; //这里是写死的，同学们可以自己设置类的成员变量，传递进来
        if (encoder->supported_samplerates)
        {
            /// ffmpeg -h  encoder=mp3
            /// Supported sample rates: 44100 48000 32000 22050 24000 16000 11025 12000 8000
            /// Supported sample formats: s32p fltp s16p
            ///
            m_aEncodeCtx->sample_rate = encoder->supported_samplerates[0];
            for (int i = 0; encoder->supported_samplerates[i]; ++i)
            {
                if (encoder->supported_samplerates[i] == 44100)
                    m_aEncodeCtx->sample_rate = 44100;
            }
        }
        m_aEncodeCtx->channels = av_get_channel_layout_nb_channels(m_aEncodeCtx->channel_layout);
        m_aEncodeCtx->channel_layout = AV_CH_LAYOUT_STEREO;
        if (encoder->channel_layouts)
        {
            m_aEncodeCtx->channel_layout = encoder->channel_layouts[0];
            for (int i = 0; encoder->channel_layouts[i]; ++i)
            {
                if (encoder->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
                    m_aEncodeCtx->channel_layout = AV_CH_LAYOUT_STEREO;
            }
        }
        m_aEncodeCtx->channels = av_get_channel_layout_nb_channels(m_aEncodeCtx->channel_layout);
        aStream->time_base = AVRational{ 1, m_aEncodeCtx->sample_rate };

        m_aEncodeCtx->codec_tag = 0;
        m_aEncodeCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

        if (!check_sample_fmt(encoder, m_aEncodeCtx->sample_fmt))
        {
            qDebug() << "Encoder does not support sample format " << av_get_sample_fmt_name(m_aEncodeCtx->sample_fmt);
            return -1;
        }

        //打开音频编码器，打开后frame_size被设置
        ret = avcodec_open2(m_aEncodeCtx, encoder, 0);
        if (ret < 0)
        {
            qDebug() << "Can not open the audio encoder, id: " << encoder->id << "error code: " << ret;
            return -1;
        }
        else{
            printf(" open audio encoder okok!\n");
        }
        //将codecCtx中的参数传给音频输出流
        ret = avcodec_parameters_from_context(aStream->codecpar, m_aEncodeCtx);
        if (ret < 0)
        {
            qDebug() << "Output audio avcodec_parameters_from_context,error code:" << ret;
            return -1;
        }

        m_swrCtx = swr_alloc();
        if (!m_swrCtx)
        {
            qDebug() << "swr_alloc failed";
            return -1;
        }
        av_opt_set_int(m_swrCtx, "in_channel_count", m_aDecodeCtx->channels, 0);	//2
        av_opt_set_int(m_swrCtx, "in_sample_rate", m_aDecodeCtx->sample_rate, 0);	//44100
        av_opt_set_sample_fmt(m_swrCtx, "in_sample_fmt", m_aDecodeCtx->sample_fmt, 0);	//AV_SAMPLE_FMT_S16
        av_opt_set_int(m_swrCtx, "out_channel_count", m_aEncodeCtx->channels, 0);	//2
        av_opt_set_int(m_swrCtx, "out_sample_rate", m_aEncodeCtx->sample_rate, 0);	//44100
        av_opt_set_sample_fmt(m_swrCtx, "out_sample_fmt", m_aEncodeCtx->sample_fmt, 0);	//AV_SAMPLE_FMT_FLTP

        if ((ret = swr_init(m_swrCtx)) < 0)
        {
            qDebug() << "swr_init failed";
            return -1;
        }
    }

    //打开输出文件
    if (!(m_oFmtCtx->oformat->flags & AVFMT_NOFILE))
    {
        if (avio_open(&m_oFmtCtx->pb, outFileName, AVIO_FLAG_WRITE) < 0)
        {
            printf("can not open output file handle!\n");
            return -1;
        }
    }
    else{
        printf(" open output file handle okok!\n");
    }
    //写文件头:“封装格式的头”：flv,mp4,aac,......
    if (avformat_write_header(m_oFmtCtx, nullptr) < 0)
    {
        printf("can not write the header of the output file!\n");
        return -1;
    }
    return 0;
}

void AudioRecorder::InitAudioBuffer()
{
    m_nbSamples = m_aEncodeCtx->frame_size;
    if (!m_nbSamples)
    {
        qDebug() << "m_nbSamples==0";
        m_nbSamples = 1024;
    }
    m_aFifoBuf = av_audio_fifo_alloc(m_aEncodeCtx->sample_fmt, m_aEncodeCtx->channels, 30 * m_nbSamples);
    if (!m_aFifoBuf)
    {
        qDebug() << "av_audio_fifo_alloc failed";
        return;
    }
}

void AudioRecorder::FlushAudioDecoder()
{
    int ret = -1;
    AVPacket pkt = { 0 };
    av_init_packet(&pkt);
    int dstNbSamples, maxDstNbSamples;
    AVFrame *rawFrame = av_frame_alloc();
    AVFrame *newFrame = AllocAudioFrame(m_aEncodeCtx, m_nbSamples);
    maxDstNbSamples = dstNbSamples = av_rescale_rnd(m_nbSamples,
                                                    m_aEncodeCtx->sample_rate, m_aDecodeCtx->sample_rate, AV_ROUND_UP);

    ret = avcodec_send_packet(m_aDecodeCtx, nullptr);
    if (ret != 0)
    {
        qDebug() << "flush audio avcodec_send_packet  failed, ret: " << ret;
        return;
    }
    while (ret >= 0)
    {
        ret = avcodec_receive_frame(m_aDecodeCtx, rawFrame);
        if (ret < 0)
        {
            if (ret == AVERROR(EAGAIN))
            {
                qDebug() << "flush audio EAGAIN avcodec_receive_frame";
                ret = 1;
                continue;
            }
            else if (ret == AVERROR_EOF)
            {
                qDebug() << "flush audio decoder finished";
                break;
            }
            qDebug() << "flush audio avcodec_receive_frame error, ret: " << ret;
            return;
        }
        ++g_aCollectFrameCnt;

        dstNbSamples = av_rescale_rnd(swr_get_delay(m_swrCtx, m_aDecodeCtx->sample_rate) + rawFrame->nb_samples,
                                      m_aEncodeCtx->sample_rate, m_aDecodeCtx->sample_rate, AV_ROUND_UP);
        if (dstNbSamples > maxDstNbSamples)
        {
            qDebug() << "flush audio newFrame realloc";
            av_freep(&newFrame->data[0]);
            ret = av_samples_alloc(newFrame->data, newFrame->linesize, m_aEncodeCtx->channels,
                                   dstNbSamples, m_aEncodeCtx->sample_fmt, 1);
            if (ret < 0)
            {
                qDebug() << "flush av_samples_alloc failed";
                return;
            }
            maxDstNbSamples = dstNbSamples;
            m_aEncodeCtx->frame_size = dstNbSamples;
            m_nbSamples = newFrame->nb_samples;
        }
        newFrame->nb_samples = swr_convert(m_swrCtx, newFrame->data, dstNbSamples,
                                           (const uint8_t **)rawFrame->data, rawFrame->nb_samples);
        if (newFrame->nb_samples < 0)
        {
            qDebug() << "flush swr_convert failed";
            return;
        }

        {
            unique_lock<mutex> lk(m_mtxABuf);
            m_cvABufNotFull.wait(lk, [newFrame, this] { return av_audio_fifo_space(m_aFifoBuf) >= newFrame->nb_samples; });
        }
        if (av_audio_fifo_write(m_aFifoBuf, (void **)newFrame->data, newFrame->nb_samples) < newFrame->nb_samples)
        {
            qDebug() << "av_audio_fifo_write";
            return;
        }
        m_cvABufNotEmpty.notify_one();
    }
    qDebug() << "audio collect frame count: " << g_aCollectFrameCnt;
}

void AudioRecorder::FlushEncoders()
{
    int ret = -1;
    bool vBeginFlush = false;
    bool aBeginFlush = false;

    m_aCurPts = 0;

    int nFlush = 2;

    while (1)
    {
        AVPacket pkt = { 0 };
        av_init_packet(&pkt);

        {
            if (!aBeginFlush)
            {
                aBeginFlush = true;
                ret = avcodec_send_frame(m_aEncodeCtx, nullptr);
                if (ret != 0)
                {
                    qDebug() << "flush audio avcodec_send_frame failed, ret: " << ret;
                    return;
                }
            }
            ret = avcodec_receive_packet(m_aEncodeCtx, &pkt);
            if (ret < 0)
            {
                av_packet_unref(&pkt);
                if (ret == AVERROR(EAGAIN))
                {
                    qDebug() << "flush EAGAIN avcodec_receive_packet";
                    ret = 1;
                    continue;
                }
                else if (ret == AVERROR_EOF)
                {
                    qDebug() << "flush audio encoder finished";
                    /*break;*/
                    if (!(--nFlush))
                        break;
                    m_aCurPts = INT_MAX;
                    continue;
                }
                qDebug() << "flush audio avcodec_receive_packet failed, ret: " << ret;
                return;
            }
            pkt.stream_index = m_aOutIndex;
            //将pts从编码层的timebase转成复用层的timebase
            av_packet_rescale_ts(&pkt, m_aEncodeCtx->time_base, m_oFmtCtx->streams[m_aOutIndex]->time_base);
            m_aCurPts = pkt.pts;
            qDebug() << "m_aCurPts: " << m_aCurPts;
            ret = av_interleaved_write_frame(m_oFmtCtx, &pkt);
            if (ret == 0)
                qDebug() << "flush write audio packet id: " << ++g_aEncodeFrameCnt;
            else
                qDebug() << "flush audio av_interleaved_write_frame failed, ret: " << ret;
            av_packet_unref(&pkt);
        }
    }
}

void AudioRecorder::Release()
{
    if (m_vOutFrame)
    {
        av_frame_free(&m_vOutFrame);
        m_vOutFrame = nullptr;
    }

    if (m_oFmtCtx)
    {
        avio_close(m_oFmtCtx->pb);
        avformat_free_context(m_oFmtCtx);
        m_oFmtCtx = nullptr;
    }
    //if (m_vDecodeCtx)
    //{
    //  // FIXME: 为什么这里会崩溃
    //	avcodec_free_context(&m_vDecodeCtx);
    //	m_vDecodeCtx = nullptr;
    //}
    if (m_aDecodeCtx)
    {
        avcodec_free_context(&m_aDecodeCtx);
        m_aDecodeCtx = nullptr;
    }

    if (m_aEncodeCtx)
    {
        avcodec_free_context(&m_aEncodeCtx);
        m_aEncodeCtx = nullptr;
    }

    if (m_aFifoBuf)
    {
        av_audio_fifo_free(m_aFifoBuf);
        m_aFifoBuf = nullptr;
    }

    if (m_aFmtCtx)
    {
        avformat_close_input(&m_aFmtCtx);
        m_aFmtCtx = nullptr;
    }
}

void AudioRecorder::setMicro(QString mic)
{
    m_micro = mic;
}
