#include "ffplay.h"
#include <iostream>
#include <cmath>
#include <string.h>
#include "ffmsg_global.h"
#include "sonic.h"
#include "screenshot.h"
#include "globalresource.h"

#ifdef _WIN32
#include <windows.h>
#endif


/* Minimum SDL audio buffer size, in samples. */
#define SDL_AUDIO_MIN_BUFFER_SIZE 512
/* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
int infinite_buffer = 0;
static int decoder_reorder_pts = -1;
static int seek_by_bytes = -1;
void print_error(const char *filename, int err)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    char errbuf[128];
    const char *errbuf_ptr = errbuf;
    if (av_strerror(err, errbuf, sizeof(errbuf)) < 0) {
        errbuf_ptr = strerror(AVUNERROR(err));
    }
    av_log(NULL, AV_LOG_ERROR, "%s: %s\n", filename, errbuf_ptr);
}

FFPlayer::FFPlayer()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    pf_playback_rate = 1.0;
    // 初始化统计信息
    ffp_reset_statistic(&stat);
}

int FFPlayer::ffp_create()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    msg_queue_init(&msg_queue_);// 消息队列初始化
    return 0;
}

void FFPlayer::ffp_destroy()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    stream_close();
    // 销毁消息队列
    msg_queue_destroy(&msg_queue_);
}

//打开指定文件进行播放准备
int FFPlayer::ffp_prepare_async_l(char *file_name)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    //保存文件名
    input_filename_ =  strdup(file_name);
    int reval = stream_open(file_name);
    return reval;
}

// 开启播放 或者恢复播放
int FFPlayer::ffp_start_l()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    // 触发播放
    toggle_pause( 0);
    return 0;
}

int FFPlayer::ffp_stop_l()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    abort_request = 1;  // 请求退出
    msg_queue_abort(&msg_queue_);  // 禁止再插入消息
    return 0;
}

/*
    *初始化 SDL 库，创建帧队列、包队列，初始化时钟和音量，
    *启动解复用器读数据线程和视频刷新线程
 */
int FFPlayer::stream_open(const char *file_name)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    //初始化 SDL 库，启用视频、音频和定时器功能
    if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
        av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
        return -1;
    }

    //初始化视频帧队列，VIDEO_PICTURE_QUEUE_SIZE_DEFAULT 为默认队列大小
    //最后一个参数 1 ，非零值表示保留最后一帧
    if (frame_queue_init(&pictq, &videoq, VIDEO_PICTURE_QUEUE_SIZE_DEFAULT, 1) < 0) {
        goto fail;
    }
    //要注意最后一个值设置为1的重要性
    //初始化音频帧队列，SAMPLE_QUEUE_SIZE 为队列大小
    if (frame_queue_init(&sampq, &audioq, SAMPLE_QUEUE_SIZE, 1) < 0) {
        goto fail;
    }
    
    //初始化视频和音频的Packet包队列
    if (packet_queue_init(&videoq) < 0 ||
        packet_queue_init(&audioq) < 0 ) {
        goto fail;
    }

    // 初始化视频时钟，将时钟序列指向视频数据包队列的序列号
    init_clock(&vidclk, &videoq.serial);
    // 初始化音频时钟，将时钟序列指向音频数据包队列的序列号
    init_clock(&audclk, &audioq.serial);
    // 初始化音频时钟序列号为 -1
    audio_clock_serial = -1;

    /*
    将初始音量值限制在 0 到 100 之间，确保输入值合法
    再将音量值转换到 0 到 SDL_MIX_MAXVOLUME 范围
    然后再设置音量值为startup_volume
    */
    startup_volume = av_clip(startup_volume, 0, 100);
    startup_volume = av_clip(SDL_MIX_MAXVOLUME *  startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
    audio_volume =  startup_volume;

    // 创建解复用器读数据线程read_thread
    read_thread_ = new std::thread(&FFPlayer::read_thread, this);
    // 创建视频刷新线程
    video_refresh_thread_ = new std::thread(&FFPlayer::video_refresh_thread, this);
    return 0;
fail:
    stream_close();
    return -1;
}

void FFPlayer::stream_close()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    abort_request = 1; // 请求退出
    if(read_thread_ && read_thread_->joinable()) {
        read_thread_->join();       // 等待线程退出
    }
    /* close each stream */
    if (audio_stream >= 0) {
        stream_component_close(audio_stream);    // 解码器线程请求abort的时候有调用 packet_queue_abort
    }
    if (video_stream >= 0) {
        stream_component_close(video_stream);
    }
    // 关闭解复用器 avformat_close_input(&ic);
    // 释放packet队列
    packet_queue_destroy(&videoq);
    packet_queue_destroy(&audioq);
    // 释放frame队列
    frame_queue_destory(&pictq);
    frame_queue_destory(&sampq);
    if(input_filename_) {
        free(input_filename_);
        input_filename_ = NULL;
    }
}

// 如果想指定解码器怎么处理？
/*
 * 根据传入的流索引，分配并初始化相应的编解码器上下文，查找并打开解码器，
 * 然后根据媒体类型（音频或视频）进行特定的初始化操作，如打开音频输出设备或启动解码线程。
 */
int FFPlayer::stream_component_open(int stream_index)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    AVCodecContext *avctx;//编解码器上下文结构体，保存了视频（音频）编解码相关信息。
    AVCodec *codec;//每种视频（音频）编解码器(例如H.264解码器)对应一个该结构体。
    int sample_rate;
    int nb_channels;
    int64_t channel_layout;
    int ret = 0;
    // 判断stream_index是否合法
    if (stream_index < 0 || stream_index >= ic->nb_streams) {
        return -1;
    }
    /*  
       解码器分配一个新的AVCodecContext结构体，
       并根据指定的编解码器codec初始化默认参数
    */
    avctx = avcodec_alloc_context3(NULL);
    if (!avctx) {
        return AVERROR(ENOMEM);
    }
    /* 
    将媒体流的参数（如编码格式、分辨率、采样率等）
    从 AVCodecParameters 复制到 AVCodecContext，避免手动配置大量参数。
    */
    ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
    if (ret < 0) {
        goto fail;
    }
    // 设置输入数据包的时间基，在编解码过程中，用于将输入数据包的时间戳转换为合适的时间表示。
    avctx->pkt_timebase = ic->streams[stream_index]->time_base;
    /* 
    根据指定的编码格式（codec_id）或编码器名称，
    在 FFmpeg 注册的解码器列表中定位对应的解码器结构体（AVCodec）
    */
    codec = (AVCodec *)avcodec_find_decoder(avctx->codec_id);
    if (!codec) {
        av_log(NULL, AV_LOG_WARNING,
               "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
        ret = AVERROR(EINVAL);
        goto fail;
    }
    /*
    完成编解码器上下文（Codec Context）与具体编解码实现的绑定
    打开编解码器
     */
    if ((ret = avcodec_open2(avctx, codec, NULL)) < 0) {
        goto fail;
    }
    switch (avctx->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
            //从avctx(即AVCodecContext)中获取音频格式参数
            sample_rate = avctx->sample_rate;;  // 采样率
            nb_channels = avctx->channels;;    // 通道数
            channel_layout = avctx->channel_layout;; // 通道布局
            /* prepare audio output 准备音频输出*/
            //调用audio_open打开sdl音频输出，实际打开的设备参数保存在audio_tgt，返回值表示输出设备的缓冲区大小
            if ((ret = audio_open( channel_layout, nb_channels, sample_rate, &audio_tgt)) < 0) {
                goto fail;
            }
            audio_hw_buf_size = ret;
            audio_src = audio_tgt;  //暂且将数据源参数等同于目标输出参数
            //初始化audio_buf相关参数
            audio_buf_size  = 0;
            audio_buf_index = 0;
            audio_stream = stream_index;    // 获取audio的stream索引
            audio_st = ic->streams[stream_index];  // 获取audio的stream指针
            // 初始化ffplay封装的音频解码器, 并将解码器上下文 avctx和Decoder绑定
            auddec.decoder_init(avctx, &audioq);
            // 启动音频解码线程
            auddec.decoder_start(AVMEDIA_TYPE_AUDIO, "audio_thread", this);
            // 允许音频输出
            //play audio
            SDL_PauseAudio(0);
            break;
        case AVMEDIA_TYPE_VIDEO:
            video_stream = stream_index;    // 获取video的stream索引
            video_st = ic->streams[stream_index];// 获取video的stream指针
            //        // 初始化ffplay封装的视频解码器
            viddec.decoder_init(avctx, &videoq); //
            //        // 启动视频频解码线程
            if ((ret = viddec.decoder_start(AVMEDIA_TYPE_VIDEO, "video_decoder", this)) < 0) {
                goto out;
            }
            break;
        default:
            break;
    }
    goto out;
fail:
    avcodec_free_context(&avctx);
out:
    return ret;
}

void FFPlayer::stream_component_close(int stream_index)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    AVCodecParameters *codecpar;
    if (stream_index < 0 || stream_index >= ic->nb_streams) {
        return;
    }
    codecpar = ic->streams[stream_index]->codecpar;
    switch (codecpar->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
            // 请求终止解码器线程
            auddec.decoder_abort(&sampq);
            // 关闭音频设备
            audio_close();
            // 销毁解码器
            auddec.decoder_destroy();
            // 释放重采样器
            swr_free(&swr_ctx);
            // 释放audio buf
            av_freep(&audio_buf1);
            audio_buf1_size = 0;
            audio_buf = NULL;
            break;
        case AVMEDIA_TYPE_VIDEO:
            // 请求退出视频画面刷新线程
            if(video_refresh_thread_ && video_refresh_thread_->joinable()) {
                video_refresh_thread_->join();  // 等待线程退出
            }
            // 请求终止解码器线程
            // 关闭音频设备
            // 销毁解码器
            viddec.decoder_abort(&pictq);
            viddec.decoder_destroy();
            break;
        default:
            break;
    }
    //    ic->streams[stream_index]->discard = AVDISCARD_ALL;  // 这个又有什么用?
    switch (codecpar->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
            audio_st = NULL;
            audio_stream = -1;
            break;
        case AVMEDIA_TYPE_VIDEO:
            video_st = NULL;
            video_stream = -1;
            break;
        default:
            break;
    }
}

/**
 * Decode one audio frame and return its uncompressed size.
 *
 * The processed audio frame is decoded, converted if required, and
 * stored in audio_buf, with size in bytes given by the return
 * value.
 */
static int audio_decode_frame(FFPlayer *is)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    int data_size, resampled_data_size;
    int64_t dec_channel_layout;
    int wanted_nb_samples;
    Frame *af;
    int ret = 0;
    if(is->paused) {
        return -1;
    }
    // 读取一帧数据
    do {
        // 若队列头部可读，则由af指向可读帧
        if (!(af = frame_queue_peek_readable(&is->sampq))) {
            return -1;
        }
        frame_queue_next(&is->sampq);  // 不同序列的出队列
    } while (af->serial != is->audioq.serial); // 这里容易出现af->serial != audioq.serial 一直循环
    // 根据frame中指定的音频参数获取缓冲区的大小 af->frame->channels * af->frame->nb_samples * 2
    data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(af->frame),
                                           af->frame->nb_samples,
                                           (enum AVSampleFormat)af->frame->format, 1);
    // 获取声道布局
    dec_channel_layout =  (af->frame->channel_layout && af->frame->channels == av_get_channel_layout_nb_channels(af->frame->channel_layout)) ?
                          af->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(af->frame));
    if(dec_channel_layout == 0) {
        dec_channel_layout = 3; // fixme
        return -1; // 这个是异常情况
    }
    // 获取样本数校正值：若同步时钟是音频，则不调整样本数；否则根据同步需要调整样本数
    //    wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);  // 目前不考虑非音视频同步的是情况
    wanted_nb_samples = af->frame->nb_samples;
    // audio_tgt是SDL可接受的音频帧数，是audio_open()中取得的参数
    // 在audio_open()函数中又有"audio_src = audio_tgt""
    // 此处表示：如果frame中的音频参数 == audio_src == audio_tgt，
    // 那音频重采样的过程就免了(因此时swr_ctr是NULL)
    // 否则使用frame(源)和audio_tgt(目标)中的音频参数来设置swr_ctx，
    // 并使用frame中的音频参数来赋值audio_src
    printf("++++++++++++++++++++++is->m_audio_src.fmt = %d, %d,%d,%d\n", is->audio_src.freq,is->audio_src.channel_layout,af->frame->sample_rate,dec_channel_layout);
    if (af->frame->format           != is->audio_src.fmt            || // 采样格式
        dec_channel_layout      != is->audio_src.channel_layout || // 通道布局
        af->frame->sample_rate  != is->audio_src.freq  ||        // 采样率
        (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx) ) {
        swr_free(&is->swr_ctx);
        is->swr_ctx = swr_alloc_set_opts(NULL,
                                         is->audio_tgt.channel_layout,  // 目标输出
                                         is->audio_tgt.fmt,
                                         is->audio_tgt.freq,
                                         dec_channel_layout,            // 数据源
                                         (enum AVSampleFormat)af->frame->format,
                                         af->frame->sample_rate,
                                         0, NULL);
        int ret = 0;
        if (!is->swr_ctx || (ret = swr_init(is->swr_ctx)) < 0) {
            char errstr[256] = { 0 };
            av_strerror(ret, errstr, sizeof(errstr));
            sprintf(errstr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
                    af->frame->sample_rate, av_get_sample_fmt_name((enum AVSampleFormat)af->frame->format), af->frame->channels,
                    is-> audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
            swr_free(&is->swr_ctx);
            ret = -1;
            goto fail;
        }
        is->audio_src.channel_layout = dec_channel_layout;
        is->audio_src.channels       = af->frame->channels;
        is->audio_src.freq = af->frame->sample_rate;
        is->audio_src.fmt = (enum AVSampleFormat)af->frame->format;
    }
    if (is->swr_ctx) {
        // 重采样输入参数1：输入音频样本数是af->frame->nb_samples
        // 重采样输入参数2：输入音频缓冲区
        const uint8_t **in = (const uint8_t **)af->frame->extended_data; // data[0] data[1]
        // 重采样输出参数1：输出音频缓冲区
        uint8_t **out = &is->audio_buf1; //真正分配缓存audio_buf1，指向是用audio_buf
        // 重采样输出参数2：输出音频缓冲区尺寸， 高采样率往低采样率转换时得到更少的样本数量，比如 96k->48k, wanted_nb_samples=1024
        // 则wanted_nb_samples * audio_tgt.freq / af->frame->sample_rate 为1024*48000/96000 = 512
        // +256 的目的是重采样内部是有一定的缓存，就存在上一次的重采样还缓存数据和这一次重采样一起输出的情况，所以目的是多分配输出buffer
        int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate
                        + 256;
        // 计算对应的样本数 对应的采样格式 以及通道数，需要多少buffer空间
        int out_size  = av_samples_get_buffer_size(NULL, is->audio_tgt.channels,
                        out_count, is->audio_tgt.fmt, 0);
        int len2;
        if (out_size < 0) {
            av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
            ret = -1;
            goto fail;
        }
        // if(audio_buf1_size < out_size) {重新分配out_size大小的缓存给audio_buf1, 并将audio_buf1_size设置为out_size }
        av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
        if (!is->audio_buf1) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        // 音频重采样：len2返回值是重采样后得到的音频数据中单个声道的样本数
        len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
        if (len2 < 0) {
            av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
            ret = -1;
            goto fail;
        }
        if (len2 == out_count) { // 这里的意思是我已经多分配了buffer，实际输出的样本数不应该超过我多分配的数量
            av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
            if (swr_init(is->swr_ctx) < 0) {
                swr_free(&is->swr_ctx);
            }
        }
        // 重采样返回的一帧音频数据大小(以字节为单位)
        is->audio_buf = is->audio_buf1;
        resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
    } else {
        // 未经重采样，则将指针指向frame中的音频数据
        is->audio_buf = af->frame->data[0]; // s16交错模式data[0], fltp data[0] data[1]
        resampled_data_size = data_size;
    }
    if (!std::isnan(af->pts)) {
        is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
    } else {
        is->audio_clock = NAN;
    }
    is->audio_clock_serial = af->serial;    // 保存当前解码帧的serial
    ret = resampled_data_size;
fail:
    return ret;
}



/* prepare a new audio buffer */
/**
 * @brief sdl_audio_callback
 * @param opaque    指向user的数据
 * @param stream    拷贝PCM的地址
 * @param len       需要拷贝的长度
 */
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    // 2ch 2字节 1024 = 4096 -> 回调每次读取2帧数据
    FFPlayer *is = (FFPlayer *)opaque;
    int audio_size, len1;
    is->audio_callback_time = av_gettime_relative();
    while (len > 0) {   // 循环读取，直到读取到足够的数据
        /* (1)如果audio_buf_index < audio_buf_size则说明上次拷贝还剩余一些数据，
         * 先拷贝到stream再调用audio_decode_frame
         * (2)如果audio_buf消耗完了，则调用audio_decode_frame重新填充audio_buf
         */
        if (is->audio_buf_index >= is->audio_buf_size) {
            audio_size = audio_decode_frame(is);
            if (audio_size < 0) {
                /* if error, just output silence */
                is->audio_buf = NULL;
                is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size
                                     * is->audio_tgt.frame_size;
                is->audio_no_data  = 1;      // 没有数据可以读取
                if(is->eof) {
                    // 如果文件以及读取完毕，此时应该判断是否还有数据可以读取，如果没有就该发送通知ui停止播放
                    is->check_play_finish();
                }
            } else {
                is->audio_buf_size = audio_size; // 讲字节 多少字节
                is->audio_no_data = 0;
            }
            is->audio_buf_index = 0;
            // 2 是否需要做变速
            if(is->ffp_get_playback_rate_change()) {
                is->ffp_set_playback_rate_change(0);
                // 初始化
                if(is->audio_speed_convert) {
                    // 先释放
                    sonicDestroyStream(is->audio_speed_convert);
                }
                // 再创建
                is->audio_speed_convert = sonicCreateStream(is->get_target_frequency(),
                                          is->get_target_channels());
                // 设置变速系数
                sonicSetSpeed(is->audio_speed_convert, is->ffp_get_playback_rate());
                sonicSetPitch(is->audio_speed_convert, 1.0);
                sonicSetRate(is->audio_speed_convert, 1.0);
            }
            if(!is->is_normal_playback_rate() && is->audio_buf) {
                // 不是正常播放则需要修改
                // 需要修改  audio_buf_index audio_buf_size audio_buf
                int actual_out_samples = is->audio_buf_size /
                                         (is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt));
                // 计算处理后的点数
                int out_ret = 0;
                int out_size = 0;
                int num_samples = 0;
                int sonic_samples = 0;
                if(is->audio_tgt.fmt == AV_SAMPLE_FMT_FLT) {
                    out_ret = sonicWriteFloatToStream(is->audio_speed_convert,
                                                      (float *)is->audio_buf,
                                                      actual_out_samples);
                } else  if(is->audio_tgt.fmt == AV_SAMPLE_FMT_S16) {
                    out_ret = sonicWriteShortToStream(is->audio_speed_convert,
                                                      (short *)is->audio_buf,
                                                      actual_out_samples);
                } else {
                    av_log(NULL, AV_LOG_ERROR, "sonic unspport ......\n");
                }
                num_samples =  sonicSamplesAvailable(is->audio_speed_convert);
                // 2通道  目前只支持2通道的
                out_size = (num_samples) * av_get_bytes_per_sample(is->audio_tgt.fmt) * is->audio_tgt.channels;
                av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
                if(out_ret) {
                    // 从流中读取处理好的数据
                    if(is->audio_tgt.fmt == AV_SAMPLE_FMT_FLT) {
                        sonic_samples = sonicReadFloatFromStream(is->audio_speed_convert,
                                        (float *)is->audio_buf1,
                                        num_samples);
                    } else  if(is->audio_tgt.fmt == AV_SAMPLE_FMT_S16) {
                        sonic_samples = sonicReadShortFromStream(is->audio_speed_convert,
                                        (short *)is->audio_buf1,
                                        num_samples);
                    } else {
                    }
                    is->audio_buf = is->audio_buf1;
                    is->audio_buf_size = sonic_samples * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
                    is->audio_buf_index = 0;
                }
            }
        }
        if(is->audio_buf_size == 0) {
            continue;
        }
        //根据缓冲区剩余大小量力而行
        len1 = is->audio_buf_size - is->audio_buf_index;
        if (len1 > len) {
            len1 = len;
        }
        if (is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME) {
            memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
        } else {
            memset(stream, 0, len1);
            if (is->audio_buf) {
                SDL_MixAudio(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1, is->audio_volume);
            }
        }
        /* 更新audio_buf_index，指向audio_buf中未被拷贝到stream的数据（剩余数据）的起始位置 */
        len -= len1;
        stream += len1;
        is->audio_buf_index += len1;
    }
    is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
    /* Let's assume the audio driver that is used by SDL has two periods. */
    if (!std::isnan(is->audio_clock)) {
        double audio_clock = is->audio_clock / is->ffp_get_playback_rate();
        set_clock_at(&is->audclk,
                     audio_clock  - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec,
                     is->audio_clock_serial,
                     is->audio_callback_time / 1000000.0);
    }
}


// 先参考我们之前讲的06-sdl-pcm范例
int FFPlayer::audio_open(int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, AudioParams *audio_hw_params)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    SDL_AudioSpec wanted_spec;
    // 音频参数设置SDL_AudioSpec
    wanted_spec.freq = wanted_sample_rate;          // 采样频率
    wanted_spec.format = AUDIO_S16SYS; // 采样点格式
    wanted_spec.channels = wanted_nb_channels;          // 2通道
    wanted_spec.silence = 0;
    wanted_spec.samples = 2048;       // 23.2ms -> 46.4ms 每次读取的采样数量，多久产生一次回调和 samples
    wanted_spec.callback = sdl_audio_callback; // 回调函数
    wanted_spec.userdata = this;
    //    SDL_OpenAudioDevice
    //打开音频设备
    if(SDL_OpenAudio(&wanted_spec, NULL) != 0) {
        return -1;
    }
    // wanted_spec是期望的参数，spec是实际的参数，wanted_spec和spec都是SDL中的结构。
    // 此处audio_hw_params是FFmpeg中的参数，输出参数供上级函数使用
    // audio_hw_params保存的参数，就是在做重采样的时候要转成的格式。
    audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
    audio_hw_params->freq = wanted_spec.freq;
    audio_hw_params->channel_layout = wanted_channel_layout;
    audio_hw_params->channels =  wanted_spec.channels;
    if(audio_hw_params->channel_layout == 0) {
        audio_hw_params->channel_layout =
            av_get_default_channel_layout(audio_hw_params->channels);
    }
    /* audio_hw_params->frame_size这里只是计算一个采样点占用的字节数 */
    audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels,
                                  1,
                                  audio_hw_params->fmt, 1);
    audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels,
                                     audio_hw_params->freq,
                                     audio_hw_params->fmt, 1);
    if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
        av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
        return -1;
    }
    // 比如2帧数据，一帧就是1024个采样点， 1024*2*2 * 2 = 8192字节
    return wanted_spec.size;	/* SDL内部缓存的数据字节, samples * channels *byte_per_sample */
}

void FFPlayer::audio_close()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    SDL_CloseAudio();  // SDL_CloseAudioDevice
}

long FFPlayer::ffp_get_duration_l()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    if(!ic) {
        return 0;
    }
    int64_t duration = fftime_to_milliseconds(ic->duration);
    if (duration < 0) {
        return 0;
    }
    return (long)duration;
}

// 当前播放的位置
long FFPlayer::ffp_get_current_position_l()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    if(!ic) {
        return 0;
    }
    int64_t start_time = ic->start_time;    // 起始时间 一般为0
    int64_t start_diff = 0;
    if (start_time > 0 && start_time != AV_NOPTS_VALUE) {
        start_diff = fftime_to_milliseconds(start_time);    // 返回只需ms这个级别的
    }
    int64_t pos = 0;
    double pos_clock = get_master_clock();  // 获取当前时钟
    if (std::isnan(pos_clock)) {
        pos = fftime_to_milliseconds(seek_pos);
    } else {
        pos = pos_clock * 1000;     //转成msg
    }
    if (pos < 0 || pos < start_diff) {
        return 0;
    }
    int64_t adjust_pos = pos - start_diff;
    return (long)adjust_pos * pf_playback_rate; // 变速的系数
}

// 暂停的请求
int FFPlayer::ffp_pause_l()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    toggle_pause(1);
    return 0;
}

void FFPlayer::toggle_pause(int pause_on)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    toggle_pause_l(pause_on);
}

void FFPlayer::toggle_pause_l(int pause_on)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    if (pause_req && !pause_on) {
        set_clock(&vidclk, get_clock(&vidclk), vidclk.serial);
        set_clock(&audclk, get_clock(&audclk), audclk.serial);
    }
    pause_req = pause_on;
    auto_resume = !pause_on;
    stream_update_pause_l();
    step = 0;
}

void FFPlayer::stream_update_pause_l()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    if (!step && (pause_req || buffering_on)) {
        stream_toggle_pause_l(1);
    } else {
        stream_toggle_pause_l(0);
    }
}

void FFPlayer::stream_toggle_pause_l(int pause_on)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    if (paused && !pause_on) {
        frame_timer += av_gettime_relative() / 1000000.0 - vidclk.last_updated;
        set_clock(&vidclk, get_clock(&vidclk), vidclk.serial);
        set_clock(&audclk, get_clock(&audclk), audclk.serial);
    } else {
    }
    if (step && (pause_req || buffering_on)) {
        paused = vidclk.paused = pause_on;
    } else {
        paused = audclk.paused = vidclk.paused =  pause_on;
        //        SDL_AoutPauseAudio(ffp->aout, pause_on);
    }
}

int FFPlayer::ffp_seek_to_l(long msec)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    int64_t start_time = 0;
    int64_t seek_pos = milliseconds_to_fftime(msec);
    int64_t duration = milliseconds_to_fftime(ffp_get_duration_l());
    if (duration > 0 && seek_pos >= duration) {
        ffp_notify_msg1(this, FFP_MSG_SEEK_COMPLETE);        // 超出了范围
        return 0;
    }
    start_time =  ic->start_time;
    if (start_time > 0 && start_time != AV_NOPTS_VALUE) {
        seek_pos += start_time;
    }
    stream_seek(seek_pos, 0, 0);
    return 0;
}

int FFPlayer::ffp_forward_to_l(long incr)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    ffp_forward_or_back_to_l(incr);
    return 0;
}

int FFPlayer::ffp_back_to_l(long incr)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    ffp_forward_or_back_to_l(incr);
    return 0;
}

int FFPlayer::ffp_forward_or_back_to_l(long incr)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    double pos;
    if (seek_by_bytes) {
        pos = -1;
        if (pos < 0 &&  video_stream >= 0) {
            pos = frame_queue_last_pos(&pictq);
        }
        if (pos < 0 && audio_stream >= 0) {
            pos = frame_queue_last_pos(&sampq);
        }
        if (pos < 0) {
            pos = avio_tell(ic->pb);
        }
        if (ic->bit_rate) {
            incr *= ic->bit_rate / 8.0;
        } else {
            incr *= 180000.0;
        }
        pos += incr;
        stream_seek(pos, incr, 1);
    } else {
        pos = get_master_clock();       // 单位是秒
        if (std::isnan(pos)) {
            pos = (double)seek_pos / AV_TIME_BASE;
        }
        pos += incr;   // 单位转成秒
        if (ic->start_time != AV_NOPTS_VALUE && pos < ic->start_time / (double)AV_TIME_BASE) {
            pos = ic->start_time / (double)AV_TIME_BASE;
        }
        //转成 AV_TIME_BASE
        stream_seek((int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
    }
    return 0;
}

void FFPlayer::stream_seek(int64_t pos, int64_t rel, int seek_by_bytes)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    if (!seek_req) {
        seek_pos = pos;
        seek_rel = rel;
        seek_flags &= ~AVSEEK_FLAG_BYTE;
        if (seek_by_bytes) {
            seek_flags |= AVSEEK_FLAG_BYTE;
        }
        seek_req = 1;
        //        SDL_CondSignal( continue_read_thread);
    }
}

int FFPlayer::ffp_screenshot_l(char *screen_path)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    // 存在视频的情况下才能截屏
    if(video_st && !req_screenshot_) {
        if(screen_path_) {
            free(screen_path_);
            screen_path_ = NULL;
        }
        screen_path_ = strdup(screen_path);
        req_screenshot_ = true;
    }
    return 0;
}

void FFPlayer::screenshot(AVFrame *frame)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    if(req_screenshot_) {
        ScreenShot shot;
        int ret = -1;
        if(frame) {
            ret = shot.SaveJpeg(frame, screen_path_, 70);
        }
        // 如果正常则ret = 0; 异常则为 < 0
        ffp_notify_msg4(this, FFP_MSG_SCREENSHOT_COMPLETE, ret, 0, screen_path_, strlen(screen_path_) + 1);
        // 截屏完毕后允许再次截屏
        req_screenshot_ = false;
    }
}

int FFPlayer::get_target_frequency()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    return audio_tgt.freq;
}

int FFPlayer::get_target_channels()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    return audio_tgt.channels;
}

void FFPlayer::ffp_set_playback_rate(float rate)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    pf_playback_rate = rate;
    pf_playback_rate_changed = 1;
}

float FFPlayer::ffp_get_playback_rate()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    return pf_playback_rate;
}

bool FFPlayer::is_normal_playback_rate()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    if(pf_playback_rate > 0.99 && pf_playback_rate < 1.01) {
        return true;
    } else {
        return false;
    }
}

int FFPlayer::ffp_get_playback_rate_change()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    return pf_playback_rate_changed;
}

void FFPlayer::ffp_set_playback_rate_change(int change)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    pf_playback_rate_changed = change;
}

void FFPlayer::ffp_set_playback_volume(int value)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    // 将输入的音量值限制在 0 到 100 之间，确保输入值合法
    value = av_clip(value, 0, 100);
    // 将音量值转换到 0 到 SDL_MIX_MAXVOLUME 范围，即根据输入的 0 - 100 的值按比例映射到 SDL 支持的音量范围
    value = av_clip(SDL_MIX_MAXVOLUME *  value / 100, 0, SDL_MIX_MAXVOLUME);
    audio_volume = value;
}

void FFPlayer::check_play_finish()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    if(eof == 1) { // 1. av_read_frame已经返回了AVERROR_EOF，eof为1代表已经读到了文件末尾
        if(audio_stream >= 0 && video_stream >= 0) { // 2.1 音频、视频同时存在的场景
            if(audio_no_data == 1 && video_no_data == 1) {
                // 发送停止
                ffp_notify_msg1(this, FFP_MSG_PLAY_FNISH);
            }
            return;
        }
        if(audio_stream >= 0) { // 2.2 只有音频存在
            if(audio_no_data == 1) {
                // 发送停止
                ffp_notify_msg1(this, FFP_MSG_PLAY_FNISH);
            }
            return;
        }
        if(video_stream >= 0) { // 2.3 只有视频存在
            if(video_no_data == 1) {
                // 发送停止
                ffp_notify_msg1(this, FFP_MSG_PLAY_FNISH);
            }
            return;
        }
    }
}
int64_t FFPlayer::ffp_get_property_int64(int id, int64_t default_value)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    switch (id) {
        case FFP_PROP_INT64_AUDIO_CACHED_DURATION:
            return  stat.audio_cache.duration;
        case FFP_PROP_INT64_VIDEO_CACHED_DURATION:
            return  stat.video_cache.duration;
        default:
            return default_value;
    }
}

/**
 *FFPlayer::ffp_track_statistic_l 对指定流的数据包队列进行统计，更新缓存统计信息
 *st 指向 AVStream 的指针，代表音视频流信息
 *q 指向 PacketQueue 的指针，代表数据包队列
 *cache 指向 FFTrackCacheStatistic 的指针，用于存储统计信息
 */
void FFPlayer::ffp_track_statistic_l(AVStream * st, PacketQueue * q, FFTrackCacheStatistic * cache)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    if (q) {
        // 将数据包队列的总大小（字节数）赋值给缓存统计信息中的 bytes 字段
        cache->bytes   = q->size;
        // 将数据包队列中的数据包数量赋值给缓存统计信息中的 packets 字段
        cache->packets = q->nb_packets;
    }
    // 检查数据包队列指针、流指针是否有效，以及流的时间基分母和分子是否大于 0
    if (q && st && st->time_base.den > 0 && st->time_base.num > 0) {
        // 计算数据包队列的持续时间，将其转换为毫秒并赋值给缓存统计信息中的 duration 字段
        cache->duration = q->duration * av_q2d(st->time_base) * 1000;  // 单位毫秒ms
    }
}
//在audio_thread解码线程做统计
//对音频流的数据包队列进行统计，更新音频缓存统计信息
void FFPlayer::ffp_audio_statistic_l()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    /*
    audio_st音频流，在read_thread线程中stream_component_open函数赋值
    audioq数据包队列在stream_open函数中初始化（主线程）
    stat统计相关的操作结构体，在ffplay构造函数中初始化
     */
    ffp_track_statistic_l(audio_st, &audioq, &stat.audio_cache);
}
//在audio_thread解码线程做统计
//对视频流的数据包队列进行统计，更新音频缓存统计信息
void FFPlayer::ffp_video_statistic_l()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    /*
    video_st音频流，在read_thread线程中stream_component_open函数赋值
    videoq数据包队列在stream_open函数中初始化（主线程）
    stat统计相关的操作结构体，在ffplay构造函数中初始化
     */
    ffp_track_statistic_l(video_st, &videoq, &stat.video_cache);
}

/*
该函数用于检查指定音视频流的数据包队列中是否已经有足够的数据包，
以决定是否需要继续从媒体文件中读取更多数据包。
 */
int FFPlayer::stream_has_enough_packets(AVStream * st, int stream_id, PacketQueue * queue)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    /*
        以下几种情况认为队列中有足够的数据包：
        1. 当 stream_id 小于 0 时，说明没有指定具体的流，不需要检查队列中的数据包数量。
        2. 当 queue->abort_request 为真时，说明队列请求中止，不需要继续读取数据包。
        3. 当 st->disposition & AV_DISPOSITION_ATTACHED_PIC 为真时，说明当前流是附带图片的流，不需要继续读取数据包。
        4. 当队列中的数据包数量 queue->nb_packets 大于 MIN_FRAMES 且队列的持续时间超过 1.0 秒时，认为队列中有足够的数据包。 
            - 队列的时长为 0
            - 队列的时长乘以流的时间基转换后的秒数大于 1 秒
    */
    return stream_id < 0 ||
           queue->abort_request ||
           (st->disposition & AV_DISPOSITION_ATTACHED_PIC) ||
           queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
}

//判断输入的媒体流是否为实时流
static int is_realtime(AVFormatContext * s)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    if(   !strcmp(s->iformat->name, "rtp")
          || !strcmp(s->iformat->name, "rtsp")
          || !strcmp(s->iformat->name, "sdp")
          ||  !strcmp(s->iformat->name, "rtmp")
      ) {
        return 1;
    }
    if(s->pb && (   !strncmp(s->filename, "rtp:", 4)
                    || !strncmp(s->filename, "udp:", 4)
                )
      ) {
        return 1;
    }
    return 0;
}
int FFPlayer::read_thread()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
#ifdef _WIN32
    CoInitializeEx(NULL, COINITBASE_MULTITHREADED);
#endif
    int err, ret;
    // 用于保存不同媒体类型（如视频、音频）的流索引
    int st_index[AVMEDIA_TYPE_NB];      // AVMEDIA_TYPE_VIDEO/ AVMEDIA_TYPE_AUDIO 等，用来保存stream index
    /*
    这种写法，可以在该线程结束时，自动析构掉pkt1。
    */
    AVPacket pkt1;
    AVPacket *pkt = &pkt1;  //
    // 初始化为-1,如果一直为-1说明没相应steam
    memset(st_index, -1, sizeof(st_index));
    video_stream = -1;// 初始化视频流索引为-1，表示还未找到视频流
    audio_stream = -1;// 初始化音频流索引为-1，表示还未找到音频流
    eof = 0;//0表示未达到文件末尾
    // 1. 创建管理播放文件的结构体
    ic = avformat_alloc_context();
    if (!ic) {
        av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
        ret = AVERROR(ENOMEM);
        goto fail;
    }
    /* 3.打开文件，主要是探测协议类型，如果是网络文件则创建网络链接等 */
    err = avformat_open_input(&ic, input_filename_, NULL, NULL);
    if (err < 0) {
        print_error(input_filename_, err);
        ret = -1;
        goto fail;
    }
    //该宏表示调用了 avformat_find_stream_info函数，暂没有其他用途
    ffp_notify_msg1(this, FFP_MSG_OPEN_INPUT);
    if (seek_by_bytes < 0) {
        seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
    }
    /*
     * 4.探测媒体类型，可得到当前文件的封装格式，音视频编码参数等信息
     * 函数执行后，AVFormatContext 和 AVStream 中的关键信息会被填充：
     * 调用该函数后得多的参数信息会比只调用avformat_open_input更为详细，
     * 其本质上是去做了decdoe packet获取信息的工作
     */
    err = avformat_find_stream_info(ic, NULL);
    if (err < 0) {
        av_log(NULL, AV_LOG_WARNING,
               "%s: could not find codec parameters\n", input_filename_);
        ret = -1;
        goto fail;
    }
    //该宏表示调用了 avformat_find_stream_info函数，在循环线程调用videowindow.cpp的回调函数时，可用于获取视频时长
    ffp_notify_msg1(this, FFP_MSG_FIND_STREAM_INFO);
    realtime = is_realtime(ic);//判断输入的媒体流是否为实时流

    /*
    将多媒体文件的详细信息以易读的方式输出到标准输出或指定的文件描述符中
    会有单独的打印信息出现，不需要qdebug或者printf
    为了便于观察，我在信息的前后加了两个打印
    */
    qDebug() << "============this is av_dump_format's information:============";
    av_dump_format(ic, 0, input_filename_, 0);
    qDebug() << "============ av_dump_format's information is end ============";

    // 6.2 利用av_find_best_stream选择流，
    st_index[AVMEDIA_TYPE_VIDEO] =
        av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
                            st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
    st_index[AVMEDIA_TYPE_AUDIO] =
        av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
                            st_index[AVMEDIA_TYPE_AUDIO],
                            st_index[AVMEDIA_TYPE_VIDEO],
                            NULL, 0);
    /* open the streams */
    /* 8. 打开视频、音频解码器。在此会打开相应解码器，并创建相应的解码线程。 */
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {// 如果有音频流则打开音频流
        stream_component_open(st_index[AVMEDIA_TYPE_AUDIO]);
    }
    ret = -1;
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) { // 如果有视频流则打开视频流
        ret = stream_component_open( st_index[AVMEDIA_TYPE_VIDEO]);
    }
    // read_thread 调用了 stream_component_open，暂无其他用途
    ffp_notify_msg1(this, FFP_MSG_COMPONENT_OPEN);
    if (video_stream < 0 && audio_stream < 0) {
        av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
               input_filename_);
        ret = -1;
        goto fail;
    }
    /*
    该宏表示ffplay 核心播放器已经准备好了，通知videowindow.ui模块调用start，在循环线程调用videowindow.cpp的回调函数时，可用于开始播放视频
    为什么不直接使用FFP_REQ_START宏，而是使用FFP_MSG_PREPARED宏，
    其实是做了一个解耦的操作，先通知videowindow.ui模块，然后通过videowindow.ui模块再去通知mediaplayer模块，通过mediaplayer模块调用ffplay的start函数
    解耦操作可以提高程序的复用性和维护性，后续可以添加更多的操作进来而不影响其余的操作
    */
    ffp_notify_msg1(this, FFP_MSG_PREPARED);
    //该宏表示videowindow.ui模块已经准备好了，请求mediaplayer模块调用ffplay的start，在循环线程调用videowindow.cpp的回调函数时，可用于开始播放视频
    // ffp_notify_msg1(this, FFP_REQ_START);
    
    while (1) {
        // 先模拟线程运行
        //        std::this_thread::sleep_for(std::chrono::milliseconds(10));
        //控制播放过程中是否需要中止相关操作
        if(abort_request) {
            break;
        }
        //当有跳转请求时，进入该判断
        if (seek_req) {
            // seek 的目标位置，直接使用 seek_pos 变量的值
            // seek_pos 代表 seek 操作想要跳转的目标位置，单位通常是时间戳（基于媒体文件的时间基）
            int64_t seek_target = seek_pos;
            /*
                计算 seek 操作的最小位置
                当 seek_rel 大于 0 时，意味着相对 seek_target 向后跳转，
                此时最小位置为 seek_target 减去相对偏移量 seek_rel 再加上 2，
                加 2 可能是为了预留一定的缓冲空间，避免跳转位置过于精确导致错过关键帧等问题；
                当 seek_rel 不大于 0 时，将最小位置设为 INT64_MIN，表示没有下限
            */
            int64_t seek_min    = seek_rel > 0 ? seek_target - seek_rel + 2 : INT64_MIN;
            /*
                计算 seek 操作的最大位置
                当 seek_rel 小于 0 时，意味着相对 seek_target 向前跳转，
                此时最大位置为 seek_target 减去相对偏移量 seek_rel 再减 2，
                减 2 可能是为了预留一定的缓冲空间，避免跳转位置过于精确导致错过关键帧等问题；
                当 seek_rel 不小于 0 时，将最大位置设为 INT64_MAX，表示没有上限 
            */
            int64_t seek_max    =  seek_rel < 0 ? seek_target -  seek_rel - 2 : INT64_MAX;
            //用于在输入的音视频文件或流中进行定位（seek）操作的重要函数
            ret = avformat_seek_file(ic, -1, seek_min, seek_target, seek_max,  seek_flags);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR,
                       "%s: error while seeking\n",  ic->filename);
            } else {
                if (audio_stream >= 0) {    //有audio流
                    // 清空音频包队列并放入刷新包
                    packet_queue_flush(&audioq);
                    packet_queue_put(&audioq, &flush_pkt);
                }
                if (video_stream >= 0) { //有video流
                    // 清空视频包队列并放入刷新包
                    packet_queue_flush(&videoq);
                    packet_queue_put(&videoq, &flush_pkt);
                }
            }
            seek_req = 0;
            eof = 0;
            // 通知seek完成
            ffp_notify_msg1(this, FFP_MSG_SEEK_COMPLETE);
        }
        /* if the queue are full, no need to read more */
        // 检查是否需要暂停读取数据，以避免队列过载
        // 当 infinite_buffer 小于 1 时，表示不使用无限缓冲区模式
        // 此时需要对队列大小和数据包数量进行检查
        if (infinite_buffer < 1 &&
            // 检查音频队列和视频队列的总大小是否超过最大队列大小
            (audioq.size + videoq.size  > MAX_QUEUE_SIZE
             || (
                // 检查音/视频流的数据包队列是否有足够的数据包
                stream_has_enough_packets(audio_st, audio_stream, &audioq) &&
                 stream_has_enough_packets(video_st, video_stream, &videoq) 
                 ))) {
            /*
                当队列中的数据包数量大于 MAX_QUEUE_SIZE 时，
                或者音频和视频流的队列中都没有足够的数据包时，
                暂停读取数据并等待 10 毫秒，以避免队列过载
            */
            std::this_thread::sleep_for(std::chrono::milliseconds(10));
            continue;
        }
        // 7.读取媒体数据，得到的是音视频分离后、解码前的数据
        ret = av_read_frame(ic, pkt); // 调用不会释放pkt的数据，需要我们自己去释放packet的数据
        if(ret < 0) { // 出错或者已经读取完毕了
            if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !eof) {        // 读取完毕了
                // 刷空包给队列
                if (video_stream >= 0) {
                    packet_queue_put_nullpacket(&videoq, video_stream);
                }
                if (audio_stream >= 0) {
                    packet_queue_put_nullpacket(&audioq, audio_stream);
                }
                eof = 1;
            }
            if (ic->pb && ic->pb->error) { // io异常 // 退出循环
                break;
            }
            std::this_thread::sleep_for(std::chrono::milliseconds(10));     // 读取完数据了，这里可以使用timeout的方式休眠等待下一步的检测
            continue;		// 继续循环
        } else {
            eof = 0;
        }
        // 插入队列  先只处理音频包
        if (pkt->stream_index == audio_stream) {
            packet_queue_put(&audioq, pkt);
        } else if (pkt->stream_index == video_stream) {
            packet_queue_put(&videoq, pkt);
        } else {
            av_packet_unref(pkt);// // 不入队列则直接释放数据
        }
    }
fail:
    return 0;
}
/* polls for possible required screen refresh at least this often, should be less than 1/fps */
#define REFRESH_RATE 0.01  // 每帧休眠10ms
int FFPlayer::video_refresh_thread()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    // 剩余休眠时间，单位为秒
    double remaining_time = 0.0;
    while (!abort_request) {
        // 如果剩余时间大于 0，进行休眠
        if (remaining_time > 0.0) {
            // 将剩余时间转换为微秒并调用 av_usleep 进行休眠
            av_usleep((int)(int64_t)(remaining_time * 1000000.0));
        }
        // 重置剩余时间为刷新速率
        remaining_time = REFRESH_RATE;
        // 调用 video_refresh 函数刷新视频画面，并传入剩余时间的指针
        video_refresh(&remaining_time);
    }
    return 0;
}
double FFPlayer::vp_duration(  Frame * vp, Frame * nextvp)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    if (vp->serial == nextvp->serial) {
        double duration = nextvp->pts - vp->pts;
        if (std::isnan(duration) || duration <= 0 || duration >  max_frame_duration) {
            return vp->duration / pf_playback_rate;
        } else {
            return duration / pf_playback_rate;
        }
    } else {
        return 0.0;
    }
}
double FFPlayer::compute_target_delay(double delay)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    double sync_threshold, diff = 0;
    /* update delay to follow master synchronisation source */
    if (get_master_sync_type() != AV_SYNC_VIDEO_MASTER) {
        /* if video is slave, we try to correct big delays by
        duplicating or deleting a frame */
        diff = get_clock(&vidclk) - get_master_clock();
        /* skip or repeat frame. We take into account the
        delay to compute the threshold. I still don't know
        if it is the best guess */
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
        if (! std::isnan(diff) && fabs(diff) <  max_frame_duration) {
            if (diff <= -sync_threshold) {
                delay = FFMAX(0, delay + diff);
            } else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD) {
                delay = delay + diff;
            } else if (diff >= sync_threshold) {
                delay = 2 * delay;
            }
        }
    }
    av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
           delay, -diff);
    return delay;
}
void FFPlayer::update_video_pts(double pts, int64_t pos, int serial)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    /* update current video pts */
    set_clock(&vidclk, pts / pf_playback_rate, serial);
}
/*
 * 视频刷新函数，用于处理视频帧的刷新逻辑
 * 该函数会从视频帧队列中取出视频帧，根据同步逻辑计算帧显示延迟，
 * 决定是否丢弃过时帧，最后调用回调函数显示视频帧。
 */
void FFPlayer::video_refresh(double * remaining_time)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    Frame *vp = nullptr, *lastvp = nullptr;
    // 目前我们先是只有队列里面有视频帧可以播放，就先播放出来
    // 判断有没有视频画面
    if (video_st) {
retry:
        // 检查视频帧队列中是否没有剩余帧
        if (frame_queue_nb_remaining(&pictq) == 0) {
            // 标记没有视频数据可读
            video_no_data = 1;
            // 检查是否已经读取到文件末尾,值为1代表已读到末尾
            if(eof == 1) {
                // 检查播放是否结束
                check_play_finish();
            }
        } else {
            video_no_data = 0;  // 有数据可读
            double last_duration, duration, delay;
            /* dequeue the picture */
            lastvp = frame_queue_peek_last(&pictq);
            // 对最后一帧进行截屏操作
            screenshot(lastvp->frame);
            vp = frame_queue_peek(&pictq);
            if (vp->serial != videoq.serial) {
                frame_queue_next(&pictq);
                goto retry;
            }
            if (lastvp->serial != vp->serial) {
                frame_timer = av_gettime_relative() / 1000000.0;
            }
            if (paused) {
                goto display;
            }
            /* compute nominal last_duration */
            last_duration = vp_duration(lastvp, vp);
            delay = compute_target_delay(last_duration);
            double time = av_gettime_relative() / 1000000.0;
            if (time <  frame_timer + delay) {
                *remaining_time = FFMIN( frame_timer + delay - time, *remaining_time);
                goto display;
            }
            frame_timer += delay;
            if (delay > 0 && time -  frame_timer > AV_SYNC_THRESHOLD_MAX) {
                frame_timer = time;
            }
            SDL_LockMutex(pictq.mutex);
            if (!std::isnan(vp->pts)) {
                update_video_pts(vp->pts, vp->pos, vp->serial);
            }
            SDL_UnlockMutex(pictq.mutex);
            if (frame_queue_nb_remaining(&pictq) > 1) {
                Frame *nextvp = frame_queue_peek_next(&pictq);
                duration = vp_duration(vp, nextvp);
                if (!step && (framedrop > 0 || (framedrop && get_master_sync_type() != AV_SYNC_VIDEO_MASTER))
                    && time >  frame_timer + duration) {
                    frame_drops_late++;
                    frame_queue_next(&pictq);
                    goto retry;
                }
            }
            frame_queue_next(&pictq);
            force_refresh = 1;
            //            if (step && !paused)
            //                stream_toggle_pause(is);
        }
display:
        /* display picture */
        if (force_refresh &&  pictq.rindex_shown) {
            if(vp) {
                if(video_refresh_callback_) {
                    video_refresh_callback_(vp);
                }
            }
        }
    }
    force_refresh = 0;
}
void FFPlayer::AddVideoRefreshCallback(
    std::function<int (const Frame *)> callback)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    video_refresh_callback_ = callback;
}
int FFPlayer::get_master_sync_type()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    if (av_sync_type == AV_SYNC_VIDEO_MASTER) {
        if (video_st) {
            return AV_SYNC_VIDEO_MASTER;
        } else {
            return AV_SYNC_AUDIO_MASTER;    /* 如果没有视频成分则使用 audio master */
        }
    } else if (av_sync_type == AV_SYNC_AUDIO_MASTER) {
        if (audio_st) {
            return AV_SYNC_AUDIO_MASTER;
        } else if(video_st) {
            return AV_SYNC_VIDEO_MASTER;    // 只有音频的存在
        } else {
            return AV_SYNC_UNKNOW_MASTER;
        }
    } else {
        return AV_SYNC_AUDIO_MASTER;
    }
}
double FFPlayer::get_master_clock()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    double val;
    switch (get_master_sync_type()) {
        case AV_SYNC_VIDEO_MASTER:
            val = get_clock(&vidclk);
            break;
        case AV_SYNC_AUDIO_MASTER:
            val = get_clock(&audclk);
            break;
        default:
            val = get_clock(&audclk);  // 这里我们不支持以外部时钟为基准的方式
            break;
    }
    return val;
}
Decoder::Decoder()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    // 初始化 AVPacket 结构体
    av_init_packet(&pkt_);
}
Decoder::~Decoder()
{
}

//初始化解码器
void Decoder::decoder_init(AVCodecContext * avctx, PacketQueue * queue)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    avctx_ = avctx;// 解码器上下文
    queue_ = queue;// 数据包队列
}

//启动解码器线程
int Decoder::decoder_start(AVMediaType codec_type, const char *thread_name, void *arg)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    // 启用包队列
    packet_queue_start(queue_);
    // 创建线程
    if(AVMEDIA_TYPE_VIDEO == codec_type) {
        decoder_thread_ = new std::thread(&Decoder::video_thread, this, arg);
    } else if (AVMEDIA_TYPE_AUDIO == codec_type) {
        decoder_thread_ = new std::thread(&Decoder::audio_thread, this, arg);
    } else {
        return -1;
    }
    return 0;
}
void Decoder::decoder_abort(FrameQueue * fq)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    packet_queue_abort(queue_);     // 请求退出包队列
    frame_queue_signal(fq);     // 唤醒阻塞的帧队列
    if(decoder_thread_ && decoder_thread_->joinable()) {
        decoder_thread_->join(); // 等待解码线程退出
        delete decoder_thread_;
        decoder_thread_ = NULL;
    }
    packet_queue_flush(queue_);  // 情况packet队列，并释放数据
}
void Decoder::decoder_destroy()
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    av_packet_unref(&pkt_);
    avcodec_free_context(&avctx_);
}
/*
不断尝试从解码器中获取解码后的帧
返回值-1: 请求退出
        0: 解码已经结束了，不再有数据可以读取
        1: 获取到解码后的frame
*/
int Decoder::decoder_decode_frame(AVFrame * frame)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    // 初始化为 AVERROR(EAGAIN)，表示需要更多输入数据
    int ret = AVERROR(EAGAIN);
    // 无限循环，直到获取到帧、解码结束或请求退出
    for (;;) {
        // 临时数据包，用于存储从队列中获取的数据包
        AVPacket pkt;
        // 1. 流连续情况下获取解码后的帧
        if (queue_->serial == pkt_serial_) { // 1.1 先判断是否是同一播放序列的数据
            do {
                if (queue_->abort_request) {
                    return -1;    // 是否请求退出
                }
                // 1.2. 获取解码帧
                switch (avctx_->codec_type) {
                    case AVMEDIA_TYPE_VIDEO:
                        // 从解码器中接收视频帧
                        ret = avcodec_receive_frame(avctx_, frame);
                        if (ret >= 0) {
                            /*
                            pts显示时间戳,指示帧应该何时被显示
                            pkt_dts解码时间戳,指示帧应该何时被解码
                            best_effort_timestamp最佳显示时间戳,一般等于pts
                            */
                            if (decoder_reorder_pts == -1) {
                                frame->pts = frame->best_effort_timestamp;
                            } else if (!decoder_reorder_pts) {
                                frame->pts = frame->pkt_dts;
                            }
                        }
                        break;
                    case AVMEDIA_TYPE_AUDIO:
                        // 从解码器中接收视频帧
                        ret = avcodec_receive_frame(avctx_, frame);
                        if (ret >= 0) {
                            // 设置时间基为 {1, frame->sample_rate}
                            //sample_rate音频帧的采样率，单位为赫兹（Hz），表示每秒采样的次数
                            AVRational tb = {1, frame->sample_rate};
                            //pts显示时间戳,指示帧应该何时被显示
                            //AV_NOPTS_VALUE用于表示 无效或未设置的时间戳
                            if (frame->pts != AV_NOPTS_VALUE) {
                                // 如果frame->pts正常则先将其从pkt_timebase转成{1, frame->sample_rate}
                                // pkt_timebase实质就是stream->time_base
                                frame->pts = av_rescale_q(frame->pts, avctx_->pkt_timebase, tb);
                            } else if (next_pts != AV_NOPTS_VALUE) {
                                // 如果frame->pts不正常则使用上一帧更新的next_pts和next_pts_tb
                                // 转成{1, frame->sample_rate}
                                frame->pts = av_rescale_q(next_pts, next_pts_tb, tb);
                            }
                            if (frame->pts != AV_NOPTS_VALUE) {
                                // 根据当前帧的pts和nb_samples预估下一帧的pts
                                next_pts = frame->pts + frame->nb_samples;
                                next_pts_tb = tb; // 设置timebase
                            }
                        }
                        break;
                }
                // 1.3. 检查解码是否已经结束，解码结束返回0
                if (ret == AVERROR_EOF) {
                    // 标记解码结束的播放序列
                    finished_ = pkt_serial_;
                    // 刷新解码器缓冲区
                    avcodec_flush_buffers(avctx_);
                    return 0;
                }
                // 1.4. 正常解码返回1
                if (ret >= 0) {
                    return 1;
                }
            } while (ret != AVERROR(EAGAIN));   // 1.5 没帧可读时ret返回EAGIN，需要继续送packet
        }

        // 2 获取一个packet，如果播放序列不一致(数据不连续)则过滤掉“过时”的packet
        do {
            // 2.1 如果没有数据可读则唤醒read_thread, 实际是continue_read_thread SDL_cond
            //            if (queue_->nb_packets == 0)  // 没有数据可读
            //                SDL_CondSignal(empty_queue_cond);// 通知read_thread放入packet
            // 2.2 如果还有pending的packet则使用它
            if (packet_pending_) {
                // 将 pending 的 packet 移动到临时 packet 中
                av_packet_move_ref(&pkt, &pkt_);
                packet_pending_ = 0;
            } else {
                // 2.3 阻塞式读取packet
                if (packet_queue_get(queue_, &pkt, 1, &pkt_serial_) < 0) {
                    return -1;
                }
            }
            // 检查播放序列是否一致，不一致则释放该 packet
            if(queue_->serial != pkt_serial_) {
                av_packet_unref(&pkt); // 释放要过滤的packet
            }
        } while (queue_->serial != pkt_serial_);// 如果不是同一播放序列(流不连续)则继续读取
        // 3 将packet送入解码器
        if (pkt.data == flush_pkt.data) {//
            // when seeking or when switching to a different stream
            avcodec_flush_buffers(avctx_); //清空里面的缓存帧
            finished_ = 0;        // 重置为0
            next_pts = start_pts;     // 主要用在了audio,重置下一帧的 pts
            next_pts_tb = start_pts_tb;// 主要用在了audio,重置下一帧的 pts 时间基
        } else {
            if (avctx_->codec_type == AVMEDIA_TYPE_SUBTITLE) {
                // 字幕解码逻辑，此处注释掉，未实际使用
                //                int got_frame = 0;
                //                ret = avcodec_decode_subtitle2(avctx_, sub, &got_frame, &pkt);
                //                if (ret < 0) {
                //                    ret = AVERROR(EAGAIN);
                //                } else {
                //                    if (got_frame && !pkt.data) {
                //                        packet_pending = 1;
                //                        av_packet_move_ref(&pkt, &pkt);
                //                    }
                //                    ret = got_frame ? 0 : (pkt.data ? AVERROR(EAGAIN) : AVERROR_EOF);
                //                }
            } else {
                // 将 packet 送入解码器
                if (avcodec_send_packet(avctx_, &pkt) == AVERROR(EAGAIN)) {
                    //                    av_log(avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
                    // 标记有 pending 的 packet
                    packet_pending_ = 1;
                    // 将临时 packet 移动到 pending 的 packet 中
                    av_packet_move_ref(&pkt_, &pkt);
                }
            }
            av_packet_unref(&pkt);	// 一定要自己去释放音视频数据
        }
    }
}

/**
 * 获取解码后的视频帧
 * 该函数尝试从解码器中获取解码后的视频帧，
 * 若获取过程中出现需要退出解码线程的情况，则返回 -1；
 * 若成功获取到视频帧，会进一步分析该帧是否需要丢弃；
 * 最后返回获取视频帧的状态。
 * frame 指向 AVFrame 结构体的指针，用于存储解码后的视频帧数据
 * return 获取视频帧的状态：
 *         - -1: 表示需要退出解码线程
 *         -  0: 表示没有获取到视频帧
 *         -  1: 表示成功获取到视频帧
 */
int Decoder::get_video_frame(AVFrame * frame)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    int got_picture;
    // 1. 获取解码后的视频帧
    if ((got_picture = decoder_decode_frame(frame)) < 0) {
        return -1; // 返回-1意味着要退出解码线程, 所以要分析decoder_decode_frame什么情况下返回-1
    }
    if (got_picture) {
        // 2. 分析获取到的该帧是否要drop掉, 该机制的目的是在放入帧队列前先drop掉过时的视频帧
        //        frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(ic, video_st, frame);
    }
    return got_picture;
}

/**
 * 将解码后的视频帧插入到帧队列中
 * 
 * 该函数会尝试从指定的帧队列中获取一个可写的帧，
 * 然后将源视频帧的相关信息和数据复制到该可写帧中，
 * 最后更新帧队列的写索引位置，完成视频帧的入队操作。
 * fq 表示要插入视频帧的帧队列
 * src_frame 表示解码后的源视频帧
 * pts 视频帧的显示时间戳（Presentation Time Stamp），单位为秒
 * duration 视频帧的持续时间，单位为秒
 * pos 视频帧在媒体文件中的位置
 * serial 视频帧的播放序列号
 */
int Decoder::queue_picture(FrameQueue * fq, AVFrame * src_frame, double pts, double duration, int64_t pos, int serial)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    Frame *vp;
    if (!(vp = frame_queue_peek_writable(fq))) { // 检测队列是否有可写空间
        return -1;    // 请求退出则返回-1
    }
    // 执行到这步说已经获取到了可写入的Frame
    //    vp->sar = src_frame->sample_aspect_ratio;
    //    vp->uploaded = 0;
    // 设置可写帧的宽度为源视频帧的宽度
    vp->width = src_frame->width;
    // 设置可写帧的高度为源视频帧的高度
    vp->height = src_frame->height;
    // 设置可写帧的格式为源视频帧的格式
    vp->format = src_frame->format;
    // 设置可写帧的显示时间戳为传入的 pts
    vp->pts = pts;
    // 设置可写帧的持续时间为传入的 duration
    vp->duration = duration;
    // 设置可写帧在媒体文件中的位置为传入的 pos
    vp->pos = pos;
    // 设置可写帧的播放序列号为传入的 serial
    vp->serial = serial;    
    // 将源视频帧中的所有数据转移到可写帧中，并复位源视频帧
    av_frame_move_ref(vp->frame, src_frame); // 将src中所有数据转移到dst中，并复位src。
    // 调用 frame_queue_push 函数更新帧队列的写索引位置，完成视频帧的入队操作
    frame_queue_push(fq);   // 更新写索引位置
    return 0;
}


int Decoder::audio_thread(void *arg)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    FFPlayer *is = (FFPlayer *)arg;
    AVFrame *frame = av_frame_alloc();  // 堆上为 AVFrame 结构体分配空间，用于存储解码后的音频帧，
    Frame *af; // 指向 Frame 结构体的指针，用于操作帧队列中的帧
    int got_frame = 0;  // 是否读取到帧
    AVRational tb;      // timebase时间基，用于时间戳的转换
    int ret = 0;
    if (!frame) {
        return AVERROR(ENOMEM);
    }
    do {
        //对音频流的数据包队列进行统计，更新音频缓存统计信息
        is->ffp_audio_statistic_l();
        // 1. 读取解码帧
        if ((got_frame = decoder_decode_frame(frame)) < 0) { // 是否获取到一帧数据
            goto the_end;    // < =0 abort
        }
        if (got_frame) {
            /*tb  = (AVRational) {
                1, frame->sample_rate
            };*/   // 设置为sample_rate为timebase
            tb.num = 1;
            tb.den = frame->sample_rate;
            // 2. 获取可写Frame
            if (!(af = frame_queue_peek_writable(&is->sampq))) { // 获取可写帧
                goto the_end;
            }
            // 3. 设置Frame并放入FrameQueue
            af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);  // 转换时间戳
            af->pos = frame->pkt_pos;
            af->serial = is->auddec.pkt_serial_;
            AVRational temp_a;
            temp_a.num = frame->nb_samples;
            temp_a.den = frame->sample_rate;
            af->duration = av_q2d(temp_a);
//            af->duration = av_q2d((AVRational) {
//                frame->nb_samples, frame->sample_rate
//            });
            av_frame_move_ref(af->frame, frame);
            frame_queue_push(&is->sampq);  // 代表队列真正插入一帧数据
        }
    } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
the_end:
    av_frame_free(&frame);
    return ret;
}

/*
 *视频解码线程函数，负责从解码器获取视频帧并将其插入帧队列
 *该函数会不断循环从解码器获取解码后的视频帧，计算帧的持续时间和显示时间戳，
 *然后将视频帧插入到帧队列中，供后续播放使用。
 */
int Decoder::video_thread(void *arg)
{
#ifdef  Debug_Falg
    qDebug() << "The name is : <" << __func__ << "> The file is :<" << __FILE__ << ">Thread is <" << QThread::currentThreadId();
#endif
    FFPlayer *is = (FFPlayer *)arg;
    AVFrame *frame = av_frame_alloc();  // 分配解码帧
    double pts;                 // pts视频帧的显示时间戳
    double duration;            // 帧持续时间
    int ret;
    //1 获取stream timebase视频流的时间基，用于时间戳的转换
    AVRational tb = is->video_st->time_base; 
    //2 获取帧率，以便计算每帧picture的duration持续时间
    AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
    if (!frame) {
        return AVERROR(ENOMEM);
    }
    for (;;) {  // 循环取出视频解码的帧数据
        is->ffp_video_statistic_l();// 统计视频packet缓存
        // 3 获取解码后的视频帧
        ret = get_video_frame(frame);
        if (ret < 0) {
            goto the_end;    //解码结束, 什么时候会结束
        }
        // 若返回值为 0，说明没有解码得到画面，继续下一次循环
        if (!ret) {         //没有解码得到画面, 什么情况下会得不到解后的帧
            continue;
        }
        //           1/25 = 0.04秒
        // 4 计算帧持续时间和换算pts值为秒
        // 1/帧率 = duration 单位秒, 没有帧率时则设置为0, 有帧率帧计算出帧间隔
        // 计算帧率的倒数，得到每帧的持续时间，单位为秒
        // 若帧率信息有效，则计算持续时间；否则持续时间设为 0
        AVRational temp_a;
        temp_a.num = frame_rate.den;
        temp_a.den = frame_rate.num;
        duration = (frame_rate.num && frame_rate.den ? av_q2d(temp_a) : 0);

        // 根据AVStream timebase计算出pts值, 单位为秒
        // 若视频帧的 pts 无效，则设为 NAN
        pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);  // 单位为秒
        // 5 将解码后的视频帧插入队列
        ret = queue_picture(&is->pictq, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial_);
        // 6 释放frame对应的数据
        av_frame_unref(frame);
        if (ret < 0) { // 返回值小于0则退出线程
            goto the_end;
        }
    }
the_end:
    av_frame_free(&frame);
    return 0;
}
