﻿#include "FmpegPlayer.h"

int max_frame_duration = 3600;

Decoder::Decoder()
{
    m_Queue = NULL;
    m_Avctx = NULL;
    m_DecoderThread = NULL;
    m_PktSerial = 0;
    m_DecoderReorderPts = -1;
    m_PacketPending = 0;
}

Decoder::~Decoder()
{

}

void Decoder::DecoderInit(AVCodecContext *avctx, PacketQueue *queue)
{
    m_Queue = queue;
    m_Avctx = avctx;
}

int Decoder::DecoderStart(AVMediaType codecType,void *arg)
{
    // 启用包队列
    packet_queue_start(m_Queue);
    // 创建线程
    if(AVMEDIA_TYPE_VIDEO == codecType)
    {
        m_DecoderThread = new std::thread(&Decoder::VideoThread, this, arg);
    }
    else if (AVMEDIA_TYPE_AUDIO == codecType)
    {
        m_DecoderThread = new std::thread(&Decoder::AudioThread, this, arg);
    }
    else
    {
        return -1;
    }
    return 0;
}

int Decoder::AudioThread(void *arg)
{
    FmpegPlayer *is = (FmpegPlayer *)arg;
    AVFrame *frame = av_frame_alloc();  // 分配解码帧
    Frame *af;
    int got_frame = 0;  // 是否读取到帧
    AVRational tb;      // timebase
    int ret = 0;
    if (!frame)
    {
        return AVERROR(ENOMEM);
    }
    do {
        //读取解码帧
        if ((got_frame = DecoderDecodeFrame(frame)) < 0)
        {
            goto the_end;
        }
        if (got_frame)
        {
            tb = (AVRational) {1, frame->sample_rate};   // 设置为sample_rate为timebase
            //获取可写Frame
            if (!(af = frame_queue_peek_writable(&is->m_Sampq)))
            {
                // 获取可写帧
                goto the_end;
            }
            //设置Frame并放入FrameQueue
            af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);  // 转换时间戳
            af->pos = frame->pkt_pos;
            af->serial = is->m_Auddec.m_PktSerial;
            af->duration = av_q2d((AVRational)
            {
                frame->nb_samples, frame->sample_rate
            });
            av_frame_move_ref(af->frame, frame);
            frame_queue_push(&is->m_Sampq);  // 代表队列真正插入一帧数据
        }
    } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
the_end:
    av_frame_free(&frame);
    return ret;
}

int Decoder::VideoThread(void *arg)
{
    FmpegPlayer *is = (FmpegPlayer *)arg;
    AVFrame *frame = av_frame_alloc();              // 分配解码帧
    double pts;                                     // pts
    double duration;                                // 帧持续时间
    int ret;
    //获取stream timebase
    AVRational tb = is->m_VideoSt->time_base;       // 获取stream timebase
    //获取帧率，以便计算每帧picture的duration
    AVRational frame_rate = av_guess_frame_rate(is->m_Ic, is->m_VideoSt, NULL);
    if (!frame)
    {
        return AVERROR(ENOMEM);
    }
    // 循环取出视频解码的帧数据
    for (;;)
    {
        // 获取解码后的视频帧
        //返回值-1: 请求退出，0: 解码已经结束了，不再有数据可以读取，1: 获取到解码后的frame
        ret = GetVideoFrame(frame);
        if (ret < 0)
        {
            goto the_end;    //解码结束, 什么时候会结束
        }
        if (!ret)
        {
            //没有解码得到画面, 持续循环他
            continue;
        }
        // 计算帧持续时间和换算pts值为秒
        // 1/帧率 = duration 单位秒, 没有帧率时则设置为0, 有帧率帧计算出帧间隔
        duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational) {
            frame_rate.den, frame_rate.num
        }) : 0);
        // 根据AVStream timebase计算出pts值, 单位为秒
        pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);       // 单位为秒
        // 将解码后的视频帧插入队列
        ret = QueuePicture(&is->m_Pictq, frame, pts, duration, frame->pkt_pos, m_PktSerial);
        // 释放frame对应的数据
        av_frame_unref(frame);
        if (ret < 0) { // 返回值小于0则退出线程
            goto the_end;
        }
    }
the_end:
    av_frame_free(&frame);
    return 0;
}

int Decoder::GetVideoFrame(AVFrame *frame)
{
    int got_picture;
    // 1. 获取解码后的视频帧
    if ((got_picture = DecoderDecodeFrame(frame)) < 0)
    {
        return -1; // 返回-1意味着要退出解码线程, 所以要分析DecoderDecodeFrame什么情况下返回-1
    }
    return got_picture;
}

//返回值-1: 请求退出，0: 解码已经结束了，不再有数据可以读取，1: 获取到解码后的frame
int Decoder::DecoderDecodeFrame(AVFrame *frame)
{
    int ret = AVERROR(EAGAIN);
    for (;;)
    {
        AVPacket pkt;
        //流连续情况下获取解码后的帧
        if (m_Queue->serial == m_PktSerial)
        {
            //先判断是否是同一播放序列的数据
            do
            {
                if (m_Queue->abort_request)
                {
                    return -1;    // 是否请求退出
                }
                //获取解码帧
                switch (m_Avctx->codec_type)
                {
                case AVMEDIA_TYPE_VIDEO:
                    ret = avcodec_receive_frame(m_Avctx, frame);
                    if (ret >= 0)
                    {
                        if (m_DecoderReorderPts == -1)
                        {
                            //stream->time_base
                            frame->pts = frame->best_effort_timestamp;
                        }
                        else if (!m_DecoderReorderPts)
                        {
                            frame->pts = frame->pkt_dts;
                        }
                    }
                    break;
                case AVMEDIA_TYPE_AUDIO:
                    ret = avcodec_receive_frame(m_Avctx, frame);
                    if (ret >= 0)
                    {
                        AVRational tb = {1, frame->sample_rate};
                        if (frame->pts != AV_NOPTS_VALUE)
                        {
                            // 如果frame->pts正常则先将其从pkt_timebase转成{1, frame->sample_rate}
                            // pkt_timebase实质就是stream->time_base
                            frame->pts = av_rescale_q(frame->pts, m_Avctx->pkt_timebase, tb); //stream->time_base
                        } else if (m_NextPts != AV_NOPTS_VALUE)
                        {
                            // 如果frame->pts不正常则使用上一帧更新的m_NextPts和m_NextPtsTb
                            // 转成{1, frame->sample_rate}
                            frame->pts = av_rescale_q(m_NextPts, m_NextPtsTb, tb);
                        }
                        if (frame->pts != AV_NOPTS_VALUE)
                        {
                            // 根据当前帧的pts和nb_samples预估下一帧的pts
                            m_NextPts = frame->pts + frame->nb_samples;
                            m_NextPtsTb = tb; // 设置timebase
                        }
                    }
                    break;
                }
                // 检查解码是否已经结束，解码结束返回0
                if (ret == AVERROR_EOF)
                {
                    m_Finished = m_PktSerial;
                    avcodec_flush_buffers(m_Avctx);
                    return 0;
                }
                // 正常解码返回1
                if (ret >= 0)
                {
                    return 1;
                }
            } while (ret != AVERROR(EAGAIN));   // 没帧可读时ret返回EAGIN，需要继续送packet
        }
        // 获取一个packet，如果播放序列不一致(数据不连续)则过滤掉“过时”的packet
        do {
            // 如果还有pending（待处理）的packet则使用它
            if (m_PacketPending)
            {
                av_packet_move_ref(&pkt, &m_Pkt);
                m_PacketPending = 0;
            }
            else
            {
                // 阻塞式读取packet
                if (packet_queue_get(m_Queue, &pkt, 1, &m_PktSerial) < 0)
                {
                    return -1;
                }
            }
            if(m_Queue->serial != m_PktSerial)
            {
                av_packet_unref(&pkt); // 释放要过滤的packet
            }
        } while (m_Queue->serial != m_PktSerial);           // 如果不是同一播放序列(流不连续)则继续读取
        // 将packet送入解码器
        if (pkt.data == flush_pkt.data)
        {
            avcodec_flush_buffers(m_Avctx);     //清空里面的缓存帧
            m_Finished = 0;             // 重置为0
            m_NextPts = m_StartPts;                 // 主要用在了audio
            m_NextPtsTb = m_StartPtsTb;             // 主要用在了audio
        }
        else
        {
            if (m_Avctx->codec_type == AVMEDIA_TYPE_SUBTITLE)
            {

            }
            else
            {
                //如果发送packet到解码器错误则吧该packet展示保存起来后面使用
                if (avcodec_send_packet(m_Avctx, &pkt) == AVERROR(EAGAIN))
                {
                    m_PacketPending = 1;
                    av_packet_move_ref(&m_Pkt, &pkt);
                }
            }
            av_packet_unref(&pkt);	// 一定要自己去释放音视频数据
        }
    }
}

int Decoder::QueuePicture(FrameQueue *fq, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
{
    Frame *vp;
    if (!(vp = frame_queue_peek_writable(fq)))
    {
        // 检测队列是否有可写空间
        return -1;    // 请求退出则返回-1
    }
    // 执行到这步说已经获取到了可写入的Frame
    vp->width = src_frame->width;
    vp->height = src_frame->height;
    vp->format = src_frame->format;
    vp->pts = pts;
    vp->duration = duration;
    vp->pos = pos;
    vp->serial = serial;                            // 设置serial
    av_frame_move_ref(vp->frame, src_frame);        // 将src中所有数据转移到dst中，并复位src
    frame_queue_push(fq);                           // 更新写索引位置
    return 0;
}

static int AudioDecodeFrame(FmpegPlayer *is)
{
    int data_size, resampled_data_size;
    int64_t dec_channel_layout;
    int wanted_nb_samples;
    Frame *af;
    int ret = 0;
    // 读取一帧数据
    do {
        // 若队列头部可读，则由af指向可读帧
        if (!(af = frame_queue_peek_readable(&is->m_Sampq)))
        {
            return -1;
        }
        frame_queue_next(&is->m_Sampq);  // 不同序列的出队列
    } while (af->serial != is->m_Audioq.serial); // 这里容易出现af->serial != audioq.serial 一直循环
    // 根据frame中指定的音频参数获取缓冲区的大小 af->frame->channels * af->frame->nb_samples * 2
    data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(af->frame),
                                           af->frame->nb_samples,
                                           (enum AVSampleFormat)af->frame->format, 1);
    // 获取声道布局
    dec_channel_layout =  (af->frame->channel_layout && af->frame->channels == av_get_channel_layout_nb_channels(af->frame->channel_layout)) ?
                          af->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(af->frame));
    if(dec_channel_layout == 0)
    {
        dec_channel_layout = 3; // fixme
        return -1; // 这个是异常情况
    }
    // 获取样本数校正值：若同步时钟是音频，则不调整样本数；否则根据同步需要调整样本数
    //    wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);  // 目前不考虑非音视频同步的是情况
    wanted_nb_samples = af->frame->nb_samples;
    // m_AudioTgt是SDL可接受的音频帧数，是audio_open()中取得的参数
    // 在audio_open()函数中又有"m_AudioSrc = m_AudioTgt""
    // 此处表示：如果frame中的音频参数 == m_AudioSrc == m_AudioTgt，
    // 那音频重采样的过程就免了(因此时swr_ctr是NULL)
    // 否则使用frame(源)和m_AudioTgt(目标)中的音频参数来设置m_SwrCtx，
    // 并使用frame中的音频参数来赋值m_AudioSrc
    if (af->frame->format           != is->m_AudioSrc.fmt            || // 采样格式
        dec_channel_layout      != is->m_AudioSrc.channel_layout || // 通道布局
        af->frame->sample_rate  != is->m_AudioSrc.freq  ||        // 采样率
        (wanted_nb_samples != af->frame->nb_samples && !is->m_SwrCtx) ) {
        swr_free(&is->m_SwrCtx);
        is->m_SwrCtx = swr_alloc_set_opts(NULL,
                                         is->m_AudioTgt.channel_layout,  // 目标输出
                                         is->m_AudioTgt.fmt,
                                         is->m_AudioTgt.freq,
                                         dec_channel_layout,            // 数据源
                                         (enum AVSampleFormat)af->frame->format,
                                         af->frame->sample_rate,
                                         0, NULL);
        int ret = 0;
        if (!is->m_SwrCtx || (ret = swr_init(is->m_SwrCtx)) < 0)
        {
            char errstr[256] = { 0 };
            av_strerror(ret, errstr, sizeof(errstr));
            sprintf(errstr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
                    af->frame->sample_rate, av_get_sample_fmt_name((enum AVSampleFormat)af->frame->format), af->frame->channels,
                    is-> m_AudioTgt.freq, av_get_sample_fmt_name(is->m_AudioTgt.fmt), is->m_AudioTgt.channels);
            swr_free(&is->m_SwrCtx);
            ret = -1;
            goto fail;
        }
        is->m_AudioSrc.channel_layout = dec_channel_layout;
        is->m_AudioSrc.channels       = af->frame->channels;
        is->m_AudioSrc.freq = af->frame->sample_rate;
        is->m_AudioSrc.fmt = (enum AVSampleFormat)af->frame->format;
    }
    if (is->m_SwrCtx)
    {
        // 重采样输入参数1：输入音频样本数是af->frame->nb_samples
        // 重采样输入参数2：输入音频缓冲区
        const uint8_t **in = (const uint8_t **)af->frame->extended_data; // data[0] data[1]
        // 重采样输出参数1：输出音频缓冲区
        uint8_t **out = &is->m_AudioBuf1; //真正分配缓存m_AudioBuf1，指向是用audio_buf
        // 重采样输出参数2：输出音频缓冲区尺寸， 高采样率往低采样率转换时得到更少的样本数量，比如 96k->48k, wanted_nb_samples=1024
        // 则wanted_nb_samples * m_AudioTgt.freq / af->frame->sample_rate 为1024*48000/96000 = 512
        // +256 的目的是重采样内部是有一定的缓存，就存在上一次的重采样还缓存数据和这一次重采样一起输出的情况，所以目的是多分配输出buffer
        int out_count = (int64_t)wanted_nb_samples * is->m_AudioTgt.freq / af->frame->sample_rate
                        + 256;
        // 计算对应的样本数 对应的采样格式 以及通道数，需要多少buffer空间
        int out_size  = av_samples_get_buffer_size(NULL, is->m_AudioTgt.channels,
                        out_count, is->m_AudioTgt.fmt, 0);
        int len2;
        if (out_size < 0)
        {
            av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
            ret = -1;
            goto fail;
        }
        // if(m_AudioBuf1Size < out_size) {重新分配out_size大小的缓存给m_AudioBuf1, 并将m_AudioBuf1Size设置为out_size }
        av_fast_malloc(&is->m_AudioBuf1, &is->m_AudioBuf1Size, out_size);
        if (!is->m_AudioBuf1)
        {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        // 音频重采样：len2返回值是重采样后得到的音频数据中单个声道的样本数
        len2 = swr_convert(is->m_SwrCtx, out, out_count, in, af->frame->nb_samples);
        if (len2 < 0)
        {
            av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
            ret = -1;
            goto fail;
        }
        if (len2 == out_count)
        { // 这里的意思是我已经多分配了buffer，实际输出的样本数不应该超过我多分配的数量
            av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
            if (swr_init(is->m_SwrCtx) < 0)
            {
                swr_free(&is->m_SwrCtx);
            }
        }
        // 重采样返回的一帧音频数据大小(以字节为单位)
        is->m_AudioBuf = is->m_AudioBuf1;
        resampled_data_size = len2 * is->m_AudioTgt.channels * av_get_bytes_per_sample(is->m_AudioTgt.fmt);
    }
    else
    {
        // 未经重采样，则将指针指向frame中的音频数据
        is->m_AudioBuf = af->frame->data[0]; // s16交错模式data[0], fltp data[0] data[1]
        resampled_data_size = data_size;
    }
    if (!std::isnan(af->pts))
    {
        is->m_AudioClock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
    }
    else
    {
        is->m_AudioClock = NAN;
    }
    is->m_AudioClockSerial = af->serial;    // 保存当前解码帧的serial
    ret = resampled_data_size;
fail:
    return ret;
}


/* prepare a new audio buffer */
/**
 * @brief sdlAudioCallback
 * @param opaque    指向user的数据
 * @param stream    拷贝PCM的地址
 * @param len       需要拷贝的长度
 */
static void SdlAudioCallback(void *opaque, Uint8 *stream, int len)
{
    // 2ch 2字节 1024 = 4096 -> 回调每次读取2帧数据
    FmpegPlayer *is = (FmpegPlayer *)opaque;
    int audio_size, len1;
    is->m_AudioCallbackTime = av_gettime_relative();
    while (len > 0)
    {
        // 循环读取，直到读取到足够的数据
        /* (1)如果m_AudioBufIndex < m_AudioBufSize则说明上次拷贝还剩余一些数据，
         * 先拷贝到stream再调用audio_decode_frame
         * (2)如果m_AudioBuf消耗完了，则调用audio_decode_frame重新填充m_AudioBuf
         */
        if (is->m_AudioBufIndex >= is->m_AudioBufSize)
        {
            audio_size = AudioDecodeFrame(is);
            if (audio_size < 0)
            {
                is->m_AudioBuf = NULL;
                is->m_AudioBufSize = SDL_AUDIO_MIN_BUFFER_SIZE / is->m_AudioTgt.frame_size
                                     * is->m_AudioTgt.frame_size;
                is->m_AudioNoData  = 1;      // 没有数据可以读取
                if(is->m_EOF)
                {
                    // 如果文件以及读取完毕，此时应该判断是否还有数据可以读取，如果没有就该发送通知ui停止播放
                    is->CheckPlayFinish();
                }
            }
            else
            {
                is->m_AudioBufSize = audio_size;
                is->m_AudioNoData = 0;
            }
            is->m_AudioBufIndex = 0;
            //是否需要做变速,变速后面要进行处理的

        }
        if(is->m_AudioBufSize == 0)
        {
            continue;
        }
        //根据缓冲区剩余大小量力而行
        len1 = is->m_AudioBufSize - is->m_AudioBufIndex;
        if (len1 > len)
        {
            len1 = len;
        }
        if (is->m_AudioBuf && is->m_AudioVolum == SDL_MIX_MAXVOLUME)
        {
            memcpy(stream, (uint8_t *)is->m_AudioBuf + is->m_AudioBufIndex, len1);
        }
        else
        {
            memset(stream, 0, len1);
            if (is->m_AudioBuf)
            {
                SDL_MixAudio(stream, (uint8_t *)is->m_AudioBuf + is->m_AudioBufIndex, len1, is->m_AudioVolum);
            }
        }
        /* 更新m_AudioBufIndex，指向m_AudioBuf中未被拷贝到stream的数据（剩余数据）的起始位置 */
        len -= len1;
        stream += len1;
        is->m_AudioBufIndex += len1;
    }
    is->m_AudioWriteBufSize = is->m_AudioBufSize - is->m_AudioBufIndex;
    //设置当前的音频时钟
    if (!std::isnan(is->m_AudioClock))
    {
        double audio_clock = is->m_AudioClock;
        SetClockAt(&is->m_Audclk,
                     audio_clock  - (double)(2 * is->m_AudioHwBufSize + is->m_AudioWriteBufSize) / is->m_AudioTgt.bytes_per_sec,
                     is->m_AudioClockSerial,
                     is->m_AudioCallbackTime / 1000000.0);
    }
}



FmpegPlayer::FmpegPlayer(int width,int height)
{
    m_Width = width;
    m_Height = height;
    m_SeekByBytes = -1;
    m_Realtime = 0;
    m_AbortRequest = 0;
    m_VideoNoData = 0;
    m_SwrCtx = NULL;
    m_AudioHwBufSize = 0;
    m_AudioBuf = NULL;
    m_AudioBuf1 = NULL;
    m_AudioBufSize = 0;
    m_AudioBuf1Size = 0;
    m_AudioBufIndex = 0;
    m_AudioVolum = 50;
    m_StartupVolume = 50;
    m_AudioCallbackTime = 0;

    m_Render = RenderManager::GetInstance()->GetRender(RENDER_TYPE::QT_RENDER,width,height);

    m_MainLayout = new QVBoxLayout();
    m_MainLayout->setSpacing(0);
    m_MainLayout->setContentsMargins(QMargins());
    setLayout(m_MainLayout);

    m_MainLayout->addWidget(m_Render);
}

FmpegPlayer::~FmpegPlayer()
{

}

int FmpegPlayer::Play()
{
    int ret = StreamOpen();
    if(ret < 0)
    {
        return -1;
    }
    return 0;
}

void FmpegPlayer::SetUrl(QString url)
{
    m_Url = url;
}

int FmpegPlayer::StreamOpen()
{
    //初始化SDL
    if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))
    {
        av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
        return -1;
    }
    // 初始化Frame帧队列
    if (frame_queue_init(&m_Pictq, &m_Videoq, VIDEO_PICTURE_QUEUE_SIZE_DEFAULT, 1) < 0)
    {
        goto fail;
    }
    if (frame_queue_init(&m_Sampq, &m_Audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
    {
        goto fail;
    }
    // 初始化Packet包队列
    if (packet_queue_init(&m_Videoq) < 0 ||
        packet_queue_init(&m_Audioq) < 0 ) {
        goto fail;
    }

    // 初始化时钟
    InitClock(&m_Vidclk, &m_Videoq.serial);
    InitClock(&m_Audclk, &m_Audioq.serial);
    m_AudioClockSerial = -1;
    // 初始化音量等
    m_StartupVolume = av_clip(m_StartupVolume, 0, 100);
    m_StartupVolume = av_clip(SDL_MIX_MAXVOLUME *  m_StartupVolume / 100, 0, SDL_MIX_MAXVOLUME);
    m_AudioVolum =  m_StartupVolume;
    // 创建解复用器读数据线程
    m_ReadThread =  new std::thread(&FmpegPlayer::ReadThread, this);

    // 创建视频刷新线程
    m_VideoRefreshThread = new std::thread(&FmpegPlayer::VideoRefreshThread, this);

    return 0;
fail:
    StreamClose();
    return -1;
}

void FmpegPlayer::StreamClose()
{

}

int FmpegPlayer::ReadThread()
{
    int err, i, ret;
    int st_index[AVMEDIA_TYPE_NB];      // AVMEDIA_TYPE_VIDEO/ AVMEDIA_TYPE_AUDIO 等，用来保存stream index
    AVPacket *pkt = av_packet_alloc();
    //初始化为-1,如果一直为-1说明没相应steam
    memset(st_index, -1, sizeof(st_index));
    m_VideoStream = -1;
    m_AudioStream = -1;
    m_EOF = 0;
    //创建上下文结构体，这个结构体是最上层的结构体，表示输入上下文
    m_Ic = avformat_alloc_context();
    if (!m_Ic)
    {
        ret = AVERROR(ENOMEM);
        goto fail;
    }
    //打开文件，主要是探测协议类型，如果是网络文件则创建网络链接等
    err = avformat_open_input(&m_Ic, m_Url.toStdString().c_str(), NULL, NULL);
    if (err < 0)
    {
        ret = -1;
        goto fail;
    }
    //判断是否需要通过字节进行seek
    if (m_SeekByBytes < 0)
    {
        m_SeekByBytes = !!(m_Ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", m_Ic->iformat->name);
    }
    /*
     * 探测媒体类型，可得到当前文件的封装格式，音视频编码参数等信息
     * 调用该函数后得多的参数信息会比只调用avformat_open_input更为详细，
     * 其本质上是去做了decdoe packet获取信息的工作
     * codecpar, filled by libavformat on stream creation or
     * in avformat_find_stream_info()
     */
    err = avformat_find_stream_info(m_Ic, NULL);
    if (err < 0)
    {
        ret = -1;
        goto fail;
    }
    m_Realtime = IsRealtime(m_Ic);
    //利用av_find_best_stream选择流，
    st_index[AVMEDIA_TYPE_VIDEO] =
        av_find_best_stream(m_Ic, AVMEDIA_TYPE_VIDEO,
                            st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
    st_index[AVMEDIA_TYPE_AUDIO] =
        av_find_best_stream(m_Ic, AVMEDIA_TYPE_AUDIO,
                            st_index[AVMEDIA_TYPE_AUDIO],
                            st_index[AVMEDIA_TYPE_VIDEO],
                            NULL, 0);
    //打开视频、音频解码器。在此会打开相应解码器，并创建相应的解码线程
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0)
    {
        // 如果有音频流则打开音频流
        StreamComponentOpen(st_index[AVMEDIA_TYPE_AUDIO]);
    }
    ret = -1;
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0)
    {
        // 如果有视频流则打开视频流
        ret = StreamComponentOpen( st_index[AVMEDIA_TYPE_VIDEO]);
    }
    if (m_VideoStream < 0 && m_AudioStream < 0)
    {
        ret = -1;
        goto fail;
    }
    while (1)
    {
        if(m_AbortRequest)
        {
            break;
        }
        //如果队列已满则不需要阅读更多
        if ((m_Audioq.size + m_Videoq.size  > MAX_QUEUE_SIZE
             || (StreamHasEnoughPackets(m_AudioSt, m_AudioStream, &m_Audioq) &&
                 StreamHasEnoughPackets(m_VideoSt, m_VideoStream, &m_Videoq) ))) {
            // 等待10ms
            std::this_thread::sleep_for(std::chrono::milliseconds(10));
            continue;
        }
        //读取媒体数据，得到的是音视频分离后、解码前的数据
        ret = av_read_frame(m_Ic, pkt); // 调用不会释放pkt的数据，需要我们自己去释放packet的数据
        if(ret < 0)
        {
            // 出错或者已经读取完毕了
            if ((ret == AVERROR_EOF || avio_feof(m_Ic->pb)) && !m_EOF)
            {
                // 读取完毕了
                // 刷空包给队列
                if (m_VideoStream >= 0)
                {
                    packet_queue_put_nullpacket(&m_Videoq, m_VideoStream);
                }
                if (m_AudioStream >= 0)
                {
                    packet_queue_put_nullpacket(&m_Audioq, m_AudioStream);
                }
                m_EOF = 1;
            }
            if (m_Ic->pb && m_Ic->pb->error)
            {
                // io异常 // 退出循环
                break;
            }
            std::this_thread::sleep_for(std::chrono::milliseconds(10));     // 读取完数据了，这里可以使用timeout的方式休眠等待下一步的检测
            continue;		// 继续循环
        }
        else
        {
            m_EOF = 0;
        }
        // 插入队列先只处理音频包
        if (pkt->stream_index == m_AudioStream)
        {
            packet_queue_put(&m_Audioq, pkt);
        }
        else if (pkt->stream_index == m_VideoStream)
        {
            packet_queue_put(&m_Videoq, pkt);
        }
        else
        {
            av_packet_unref(pkt);       //不入队列则直接释放数据
        }
    }
fail:
    return 0;
}

#define REFRESH_RATE 0.01  // 每帧休眠10ms
int FmpegPlayer::VideoRefreshThread()
{
    double remaining_time = 0.0;
    while (!m_AbortRequest)
    {
        if (remaining_time > 0.0)
        {
            av_usleep((int)(int64_t)(remaining_time * 1000000.0));
        }
        remaining_time = REFRESH_RATE;
        VideoRefresh(&remaining_time);
    }
}

void FmpegPlayer::VideoRefresh(double *remaining_time)
{
    Frame *vp = nullptr, *lastvp = nullptr;
    // 目前我们先是只有队列里面有视频帧可以播放，就先播放出来
    // 判断有没有视频画面
    if (m_VideoSt) {
retry:
        if (frame_queue_nb_remaining(&m_Pictq) == 0)
        {
            // 视频帧队列没有数据
            m_VideoNoData = 1;  // 没有数据可读
            if(m_EOF == 1)
            {
                CheckPlayFinish();
            }
        }
        else
        {
            m_VideoNoData = 0;  // 有数据可读
            double last_duration, duration, delay;
            lastvp = frame_queue_peek_last(&m_Pictq);
            vp = frame_queue_peek(&m_Pictq);
            if (vp->serial != m_Videoq.serial)
            {
                frame_queue_next(&m_Pictq);
                goto retry;
            }
            if (lastvp->serial != vp->serial)
            {
                m_FrameTimer = av_gettime_relative() / 1000000.0;
            }
            last_duration = VpDuration(lastvp, vp);
            delay = ComputeTargetDelay(last_duration);

            double time = av_gettime_relative() / 1000000.0;
            if (time <  m_FrameTimer + delay)
            {
                *remaining_time = FFMIN( m_FrameTimer + delay - time, *remaining_time);
                goto display;
            }
            m_FrameTimer += delay;
            if (delay > 0 && time -  m_FrameTimer > AV_SYNC_THRESHOLD_MAX) {
                m_FrameTimer = time;
            }
            SDL_LockMutex(m_Pictq.mutex);
            if (!std::isnan(vp->pts))
            {
                UpdateVideoPts(vp->pts, vp->pos, vp->serial);
            }
            SDL_UnlockMutex(m_Pictq.mutex);
            if (frame_queue_nb_remaining(&m_Pictq) > 1)
            {
                Frame *nextvp = frame_queue_peek_next(&m_Pictq);
                duration = VpDuration(vp, nextvp);
                if (!m_Step && (m_Framedrop > 0 || (m_Framedrop && GetMasterSyncType() != AV_SYNC_VIDEO_MASTER))
                    && time >  m_FrameTimer + duration) {
                    m_FrameDropsLate++;
                    frame_queue_next(&m_Pictq);
                    goto retry;
                }
            }

            frame_queue_next(&m_Pictq);
            m_ForceRefresh = 1;
        }
display:
        //刷新显示视频
        if (m_ForceRefresh/* && m_Pictq.rindex_shown*/)
        {
            if(vp)
            {
                if(m_Render != NULL)
                {
                    m_Render->StartPlay();
                    m_Render->Draw(vp);
                    frame_queue_next(&m_Pictq);
                }
            }
        }
    }
    m_ForceRefresh = 0;
}

int FmpegPlayer::IsRealtime(AVFormatContext * s)
{
    if(   !strcmp(s->iformat->name, "rtp")
          || !strcmp(s->iformat->name, "rtsp")
          || !strcmp(s->iformat->name, "sdp")
          ||  !strcmp(s->iformat->name, "rtmp"))
    {
        return 1;
    }
    if(s->pb && (   !strncmp(s->filename, "rtp:", 4)
                    || !strncmp(s->filename, "udp:", 4)))
    {
        return 1;
    }
    return 0;
}

int FmpegPlayer::StreamComponentOpen(int streamIndex)
{
    AVCodecContext *avctx;
    AVCodec *codec;
    int sample_rate;
    int nb_channels;
    int64_t channel_layout;
    int ret = 0;
    // 判断streamIndex是否合法
    if (streamIndex < 0 || streamIndex >= m_Ic->nb_streams)
    {
        return -1;
    }
    /*  为解码器分配一个编解码器上下文结构体 */
    avctx = avcodec_alloc_context3(NULL);
    if (!avctx)
    {
        return AVERROR(ENOMEM);
    }
    /* 将码流中的编解码器信息拷贝到新分配的编解码器上下文结构体 */
    ret = avcodec_parameters_to_context(avctx, m_Ic->streams[streamIndex]->codecpar);
    if (ret < 0)
    {
        goto fail;
    }
    // 设置pkt_timebase
    avctx->pkt_timebase = m_Ic->streams[streamIndex]->time_base;
    /* 根据codec_id查找解码器 */
    codec = (AVCodec *)avcodec_find_decoder(avctx->codec_id);
    if (!codec)
    {
        av_log(NULL, AV_LOG_WARNING,
               "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
        ret = AVERROR(EINVAL);
        goto fail;
    }
    if ((ret = avcodec_open2(avctx, codec, NULL)) < 0)
    {
        goto fail;
    }
    switch (avctx->codec_type)
    {
    case AVMEDIA_TYPE_AUDIO:
        //从avctx(即AVCodecContext)中获取音频格式参数
        sample_rate = avctx->sample_rate;;  // 采样率
        nb_channels = avctx->channels;;    // 通道数
        channel_layout = avctx->channel_layout;; // 通道布局
        //prepare audio output 准备音频输出
        //调用audio_open打开sdl音频输出，实际打开的设备参数保存在audio_tgt，返回值表示输出设备的缓冲区大小
        if ((ret = AudioOpen( channel_layout, nb_channels, sample_rate, &m_AudioTgt)) < 0)
        {
            goto fail;
        }
        m_AudioHwBufSize = ret;
        m_AudioSrc = m_AudioTgt;  //暂且将数据源参数等同于目标输出参数
        //初始化audio_buf相关参数
        m_AudioBufSize  = 0;
        m_AudioBufIndex = 0;
        m_AudioStream = streamIndex;    // 获取audio的stream索引
        m_AudioSt = m_Ic->streams[streamIndex];  // 获取audio的stream指针
        // 初始化ffplay封装的音频解码器, 并将解码器上下文 avctx和Decoder绑定
        m_Auddec.DecoderInit(avctx, &m_Audioq);
        // 启动音频解码线程
        m_Auddec.DecoderStart(AVMEDIA_TYPE_AUDIO, this);
        // 允许音频输出
        //play audio
        SDL_PauseAudio(0);
        break;

    case AVMEDIA_TYPE_VIDEO:
        m_VideoStream = streamIndex;                // 获取video的stream索引
        m_VideoSt = m_Ic->streams[streamIndex];     // 获取video的stream指针
        //初始化ffplay封装的视频解码器
        m_Viddec.DecoderInit(avctx, &m_Videoq);
        //启动视频频解码线程
        if ((ret = m_Viddec.DecoderStart(AVMEDIA_TYPE_VIDEO, this)) < 0)
        {
            goto out;
        }
        break;
    default:
        break;
    }
    goto out;
fail:
    avcodec_free_context(&avctx);
out:
    return ret;
}

int FmpegPlayer::StreamHasEnoughPackets(AVStream *st, int streamId, PacketQueue *queue)
{
    return streamId < 0 ||
           queue->abort_request ||
           (st->disposition & AV_DISPOSITION_ATTACHED_PIC) ||
            queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
}

void FmpegPlayer::CheckPlayFinish()
{
    //如果m_EOF == 1则表示av_read_frame已经返回了AVERROR_EOF
    if(m_EOF == 1)
    {
        if(m_AudioStream >= 0 && m_VideoStream >= 0)
        {
            // 2.1 音频、视频同时存在的场景
            if(m_AudioNoData == 1 && m_VideoNoData == 1)
            {
                // 发送停止，后续要增加一个消息队列来接收停止
            }
            return;
        }
        if(m_AudioStream >= 0)
        { // 2.2 只有音频存在
            if(m_AudioNoData == 1)
            {
                // 发送停止

            }
            return;
        }
        if(m_VideoStream >= 0)
        { // 2.3 只有视频存在
            if(m_VideoNoData == 1)
            {
                // 发送停止

            }
            return;
        }
    }
}

int FmpegPlayer::AudioOpen(int64_t wantedChannelLayout, int wantedNbChannels, int wantedSampleRate, AudioParams *audioHwParams)
{
    SDL_AudioSpec wanted_spec;
    // 音频参数设置SDL_AudioSpec
    wanted_spec.freq = wantedSampleRate;          // 采样频率
    wanted_spec.format = AUDIO_S16SYS; // 采样点格式
    wanted_spec.channels = wantedNbChannels;          // 2通道
    wanted_spec.silence = 0;
    wanted_spec.samples = 2048;       // 23.2ms -> 46.4ms 每次读取的采样数量，多久产生一次回调和 samples
    wanted_spec.callback = SdlAudioCallback; // 回调函数
    wanted_spec.userdata = this;
    //    SDL_OpenAudioDevice
    //打开音频设备
    if(SDL_OpenAudio(&wanted_spec, NULL) != 0)
    {
        return -1;
    }
    // wanted_spec是期望的参数，spec是实际的参数，wanted_spec和spec都是SDL中的结构。
    // 此处audioHwParams是FFmpeg中的参数，输出参数供上级函数使用
    // audioHwParams保存的参数，就是在做重采样的时候要转成的格式。
    audioHwParams->fmt = AV_SAMPLE_FMT_S16;
    audioHwParams->freq = wanted_spec.freq;
    audioHwParams->channel_layout = wantedChannelLayout;
    audioHwParams->channels =  wanted_spec.channels;
    if(audioHwParams->channel_layout == 0)
    {
        audioHwParams->channel_layout = av_get_default_channel_layout(audioHwParams->channels);
    }
    /* audioHwParams->frame_size这里只是计算一个采样点占用的字节数 */
    audioHwParams->frame_size = av_samples_get_buffer_size(NULL, audioHwParams->channels,1,audioHwParams->fmt, 1);
    audioHwParams->bytes_per_sec = av_samples_get_buffer_size(NULL, audioHwParams->channels,audioHwParams->freq,audioHwParams->fmt, 1);
    if (audioHwParams->bytes_per_sec <= 0 || audioHwParams->frame_size <= 0)
    {
        av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
        return -1;
    }
    // 比如2帧数据，一帧就是1024个采样点， 1024*2*2 * 2 = 8192字节
    return wanted_spec.size;	/* SDL内部缓存的数据字节, samples * channels *byte_per_sample */

}

double FmpegPlayer::VpDuration(Frame *vp, Frame *nextvp)
{
    if (vp->serial == nextvp->serial)
    {
        double duration = nextvp->pts - vp->pts;
        if (std::isnan(duration) || duration <= 0 || duration >  max_frame_duration)
        {
            return vp->duration;
        }
        else
        {
            return duration;
        }
    } else
    {
        return 0.0;
    }
}

double FmpegPlayer::ComputeTargetDelay(double delay)
{
    double sync_threshold, diff = 0;
    if (GetMasterSyncType() != AV_SYNC_VIDEO_MASTER)
    {
        diff = GetClock(&m_Vidclk) - GetMasterClock();
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
        if (! std::isnan(diff) && fabs(diff) <  max_frame_duration)
        {
            if (diff <= -sync_threshold)
            {
                delay = FFMAX(0, delay + diff);
            } else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
            {
                delay = delay + diff;
            }
            else if (diff >= sync_threshold)
            {
                delay = 2 * delay;
            }
        }
    }
    av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
           delay, -diff);
    return delay;
}

int FmpegPlayer::GetMasterSyncType()
{
    if (m_AvSyncType == AV_SYNC_VIDEO_MASTER)
    {
        if (m_VideoSt)
        {
            return AV_SYNC_VIDEO_MASTER;
        } else
        {
            return AV_SYNC_AUDIO_MASTER;    /* 如果没有视频成分则使用 audio master */
        }
    }
    else if (m_AvSyncType == AV_SYNC_AUDIO_MASTER)
    {
        if (m_AudioSt)
        {
            return AV_SYNC_AUDIO_MASTER;
        }
        else if(m_VideoSt)
        {
            return AV_SYNC_VIDEO_MASTER;    // 只有音频的存在
        }
        else
        {
            return AV_SYNC_UNKNOW_MASTER;
        }
    }
}

double FmpegPlayer::GetMasterClock()
{
    double val;
    switch (GetMasterSyncType())
    {
        case AV_SYNC_VIDEO_MASTER:
            val = GetClock(&m_Vidclk);
            break;
        case AV_SYNC_AUDIO_MASTER:
            val = GetClock(&m_Audclk);
            break;
        default:
            val = GetClock(&m_Audclk);  // 这里我们不支持以外部时钟为基准的方式
            break;
    }
    return val;
}

void FmpegPlayer::UpdateVideoPts(double pts, int64_t pos, int serial)
{
    SetClock(&m_Vidclk, pts, serial);
}

double GetClock(Clock *c)
{
    if (*c->queue_serial != c->serial)
    {
        return NAN; // 不是同一个播放序列，时钟是无效
    }
    if (c->paused)
    {
        return c->pts;  // 暂停的时候返回的是pts
    }
    else
    {
        double time = av_gettime_relative() / 1000000.0;
        return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
    }
}

void SetClockAt(Clock *c, double pts, int serial, double time)
{
    c->pts		= pts;                      /* 当前帧的pts */
    c->last_updated = time;                 /* 最后更新的时间，实际上是当前的一个系统时间 */
    c->pts_drift	= c->pts - time;        /* 当前帧pts和系统时间的差值，正常播放情况下两者的差值应该是比较固定的，因为两者都是以时间为基准进行线性增长 */
    c->serial = serial;
}

void SetClock(Clock *c, double pts, int serial)
{
    double time = av_gettime_relative() / 1000000.0;
    SetClockAt(c, pts, serial, time);
}

void InitClock(Clock *c, int *queue_serial)
{
    c->speed = 1.0;
    c->paused = 0;
    c->queue_serial = queue_serial;
    SetClock(c, NAN, -1);
}
