﻿/*
 * Copyright (c) 2003 Fabrice Bellard
 *
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

/**
 * @file
 * simple media player based on the FFmpeg libraries
 */

#include "config.h"
#include <inttypes.h>
#include <math.h>
#include <limits.h>
#include <signal.h>
#include <stdint.h>

#include "libavutil/avstring.h"
#include "libavutil/eval.h"
#include "libavutil/mathematics.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
#include "libavutil/dict.h"
#include "libavutil/fifo.h"
#include "libavutil/parseutils.h"
#include "libavutil/samplefmt.h"
#include "libavutil/avassert.h"
#include "libavutil/time.h"
#include "libavutil/bprint.h"
#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
#include "libavutil/opt.h"
#include "libavcodec/avfft.h"
#include "libswresample/swresample.h"

#if CONFIG_AVFILTER
# include "libavfilter/avfilter.h"
# include "libavfilter/buffersink.h"
# include "libavfilter/buffersrc.h"
#endif

#include <SDL.h>
#include <SDL_thread.h>

#include "cmdutils.h"

#include <assert.h>

const char program_name[] = "ffplay";
const int program_birth_year = 2003;

#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
#define MIN_FRAMES 25
#define EXTERNAL_CLOCK_MIN_FRAMES 2
#define EXTERNAL_CLOCK_MAX_FRAMES 10

/* Minimum SDL audio buffer size, in samples. */
#define SDL_AUDIO_MIN_BUFFER_SIZE 512
/* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30

/* Step size for volume control in dB */
#define SDL_VOLUME_STEP (0.75)

/* no AV sync correction is done if below the minimum AV sync threshold */
#define AV_SYNC_THRESHOLD_MIN 0.04
/* AV sync correction is done if above the maximum AV sync threshold */
#define AV_SYNC_THRESHOLD_MAX 0.1
/* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
#define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
/* no AV correction is done if too big error */
#define AV_NOSYNC_THRESHOLD 10.0

/* maximum audio speed change to get correct sync */
#define SAMPLE_CORRECTION_PERCENT_MAX 10

/* external clock speed adjustment constants for realtime sources based on buffer fullness */
#define EXTERNAL_CLOCK_SPEED_MIN  0.900
#define EXTERNAL_CLOCK_SPEED_MAX  1.010
#define EXTERNAL_CLOCK_SPEED_STEP 0.001

/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
#define AUDIO_DIFF_AVG_NB   20

/* polls for possible required screen refresh at least this often, should be less than 1/fps */
#define REFRESH_RATE 0.01

/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
/* TODO: We assume that a decoded and resampled frame fits into this buffer */
#define SAMPLE_ARRAY_SIZE (8 * 65536)

#define CURSOR_HIDE_DELAY 1000000

#define USE_ONEPASS_SUBTITLE_RENDER 1

static unsigned sws_flags = SWS_BICUBIC;

typedef struct MyAVPacketList {
    AVPacket *pkt;  // AVPacket 包
    int serial;     // 配合 PacketQueue 的 serial 使用，当调整播放进度后， PacketQueue 的serial 会加 1，和这里的就会不同。那么就会丢掉队里的缓存帧
} MyAVPacketList;

typedef struct PacketQueue {
    AVFifoBuffer *pkt_list; // 先进先出环形缓冲队列，存储的是 AVPacket
    int nb_packets;         // 记录 AVPacket 个数
    int size;               // 所有 AVPacket 的大小加上 AVPacket size
    int64_t duration;       // 队列的时长，所有的 AVPacket 的 duration 之和
    int abort_request;      // 队列终止请求
    int serial;             // 队列的序号，每次跳转播放时间点，serial 就会 +1。另一个数据结构 MyAVPacketList 里面也有一个 serial 字段
    SDL_mutex *mutex;       // 锁对象
    SDL_cond *cond;         // 条件变量
} PacketQueue;

#define VIDEO_PICTURE_QUEUE_SIZE 3
#define SUBPICTURE_QUEUE_SIZE 16
#define SAMPLE_QUEUE_SIZE 9

//#define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
#define FRAME_QUEUE_SIZE 16

typedef struct AudioParams {
    int freq;
    int channels;
    int64_t channel_layout;
    enum AVSampleFormat fmt;
    int frame_size;
    int bytes_per_sec;
} AudioParams;

typedef struct Clock {
    double pts;           /* clock base */
    double pts_drift;     /* clock base minus time at which we updated the clock */
    double last_updated;
    double speed;
    int serial;           /* clock is based on a packet with this serial */
    int paused;
    int *queue_serial;    /* pointer to the current packet queue serial, used for obsolete clock detection */
} Clock;

/* Common struct for handling all types of decoded data and allocated render buffers. */
typedef struct Frame {
    // 音视频未压缩帧
    AVFrame *frame;
    AVSubtitle sub;
    // 音视频播放序列
    int serial;
    // 显示时间
    double pts;           /* presentation timestamp for the frame */
    // 时长
    double duration;      /* estimated duration of the frame */
    // 所在 AVPacket 的偏移位置
    int64_t pos;          /* byte position of the frame in the input file */
    // 宽，视频仅有
    int width;
    // 高，视频仅有
    int height;
    // 音频是采样格式  AVSampleFormat
    // 视频是像素格式  AVPixelFormat
    int format;
    // 视频宽高显示比, 视频仅有
    AVRational sar;
    int uploaded;
    int flip_v;
} Frame;

typedef struct FrameQueue {
    // 缓存 Frame 队里，这里是 16
    Frame queue[FRAME_QUEUE_SIZE];
    // 读索引
    int rindex;
    // 写索引
    int windex;
    // size 表示数据总大小
    int size;
    // max_size 表示存储了多少个 Frame
    int max_size;
    // 保存上一帧的数据，这个主要是用于在窗口变化时拿上一帧的数据临时渲染显示一下，音频也有，但不知道有何用？
    int keep_last;
    // 实际读的时候为 rindex + rindex_shown， 这是因为 FrameQueu 要缓存上一帧数据
    int rindex_shown;

    // FrameQueue 读写锁
    SDL_mutex *mutex;
    // 条件变量
    SDL_cond *cond;
    // 引用的解复用队列 PacketQueue
    PacketQueue *pktq;  // 关联 PacketQueue 有各自的锁 mutex 和条件变量
} FrameQueue;

enum {
    AV_SYNC_AUDIO_MASTER, /* default choice */
    AV_SYNC_VIDEO_MASTER,
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
};

typedef struct Decoder {
    AVPacket *pkt;      // 压缩音视频数据包，丢给解码器去解码
    PacketQueue *queue; // AVPacket 队列
    AVCodecContext *avctx;  // 解码器上下文
    int pkt_serial; // 解码序列
    int finished;   // 结束标志
    int packet_pending;
    SDL_cond *empty_queue_cond; // 用于唤醒 read_thread，实际上就是 is->continue_read_thread
    int64_t start_pts;  // 初始化时是stream的start time
    AVRational start_pts_tb; //  初始化时是stream的time_base
    int64_t next_pts;   // 记录最近⼀次解码后的frame的pts，当解出来的部分帧没有有效的pts时则使⽤next_pts进⾏推算
    AVRational next_pts_tb; // next_pts的单位
    SDL_Thread *decoder_tid; // 解码线程 id
} Decoder;

typedef struct VideoState {
    // 解复用线程 id
    SDL_Thread *read_tid;

    // 指定封装格式，例如 -f flv
    AVInputFormat *iformat;

    // 终止请求
    int abort_request;
    int force_refresh;

    // 暂停状态
    int paused;

    // 上一次状态，用于网络流播放
    int last_paused;

    // 表示是否 mp3 封面，这里默认是 1
    int queue_attachments_req;
    int seek_req;
    int seek_flags;
    int64_t seek_pos;
    int64_t seek_rel;

    int read_pause_return;

    // ffmpeg 解复用上下文
    AVFormatContext *ic;

    // 实时流: rtsp、rtmp...
    int realtime;

    // 音频时钟，记录音频流当前的播放时刻
    Clock audclk;

    // 视频时钟，记录视频流当前播放时刻
    Clock vidclk;

    // 外部时钟，取第一帧音频或视频的 pts 作为起始时间，然后随着物理时间的消逝增长，所以是物理时间的当前时刻
    Clock extclk;

     // 视频帧队列
    FrameQueue pictq;

    // 字幕帧队列
    FrameQueue subpq;

    // 音频帧队列
    FrameQueue sampq;

     // 音频解码器，这是 ffplay 内部封装的
    Decoder auddec;

    // 视频解码器
    Decoder viddec;

    // 字幕解码器
    Decoder subdec;

    // 音视频同步方式，以音频为准，以视频时钟为准，以外部时钟为准。默认以音频时钟为准
    int av_sync_type;

    // 音频播放的累计pts
    double audio_clock;

    int audio_clock_serial; // 存储 AVFrame 的 serial 字段
    double audio_diff_cum; /* used for AV difference average computation */

     // 求加权平均数权重系数
    double audio_diff_avg_coef;

    // 音视频同步阈值
    double audio_diff_threshold;

    //  记录当前有多少音频帧数据，是为了加权平均计算而来
    int audio_diff_avg_count;

    // 字幕流索引
    int subtitle_stream;

    // 字幕流
    AVStream *subtitle_st;

    // 字幕压缩包 AVPacket 队里
    PacketQueue subtitleq;

    // 音频流
    AVStream *audio_st;

    // 视频流
    int audio_stream;

    // 音频压缩包 AVPacket 队里
    PacketQueue audioq;

    // sdl 内部缓冲区大小，这里一般是采样样本数 * 通道数 * 采样精度(字节)
    int audio_hw_buf_size;

    // 最终的音频数据，不重采样时保存的 AVFrame 的 data 数据
    // 经过重采样后，保存的是 audio_buf1
    uint8_t *audio_buf;

    // 重采样后的音频数据
    uint8_t *audio_buf1;

    // 音频帧数据缓冲区
    unsigned int audio_buf_size; /* in bytes */
    unsigned int audio_buf1_size;

    // sdl 一次读取的音频数据索引
    int audio_buf_index; /* in bytes */

    // 音频缓冲区还剩下多少数据未被 sdl 读走
    int audio_write_buf_size;

    // 播放器声音大小
    int audio_volume;

    // 是否静音
    int muted;


#if CONFIG_AVFILTER
    struct AudioParams audio_filter_src;
#endif
    // 经过 sdl 打开后的音频参数, 和 audio_tgt 一样
    struct AudioParams audio_src;

    // 经过 sdl 打开后的音频参数
    struct AudioParams audio_tgt;
    // 音频重采样上下文
    struct SwrContext *swr_ctx;

    // 用来记录丢帧数量，音视频同步一般都以音频为主时钟，视频向音频靠。
    int frame_drops_early;

    int frame_drops_late;

    // 显示模式， 0: 视频  1: 声波图  2: RDFT
    enum ShowMode {
        SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
    } show_mode;

    int16_t sample_array[SAMPLE_ARRAY_SIZE];
    int sample_array_index;
    int last_i_start;
    RDFTContext *rdft;
    int rdft_bits;
    FFTSample *rdft_data;
    int xpos;
    double last_vis_time;
    SDL_Texture *vis_texture;
    SDL_Texture *sub_texture;
    SDL_Texture *vid_texture;

    // 记录帧何时显示到窗口上
    double frame_timer;
    // 记录最后一帧的时间，单位为微妙
    double frame_last_returned_time;
    // 记录一下经过视频滤镜的时间
    double frame_last_filter_delay;

    // 视频流索引
    int video_stream;

    // 视频流
    AVStream *video_st;

    // 视频队列
    PacketQueue videoq;
    double max_frame_duration;      // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity

    struct SwsContext *img_convert_ctx;
    struct SwsContext *sub_convert_ctx;

    // 解复用结束
    int eof;

    // 文件名称
    char *filename;

    // 视频位置
    int width, height, xleft, ytop;

    // 逐帧播放时使用
    int step;

// 一般都是 1，因为 ffmpeg 滤镜模块非常重要
#if CONFIG_AVFILTER
    // 多个滤镜切换
    int vfilter_idx;
    // 保存视频输入滤镜上下文
    AVFilterContext *in_video_filter;   // the first filter in the video chain
    // 保存视频输出滤镜上下文
    AVFilterContext *out_video_filter;  // the last filter in the video chain
    // 保存音频输入滤镜上下文
    AVFilterContext *in_audio_filter;   // the first filter in the audio chain
    // 保存音频输出滤镜上下文
    AVFilterContext *out_audio_filter;  // the last filter in the audio chain
    // 滤镜容器
    AVFilterGraph *agraph;              // audio filter graph
#endif

    // 用于播放的视频流，对于有多路视频源的话，可以切换播放
    int last_video_stream;

    // 最后一个音频流
    int last_audio_stream;

    // 最后一个字幕流
    int last_subtitle_stream;
//    int last_video_stream, last_audio_stream, last_subtitle_stream;

    // 解复用线程条件变量，
    // 当 PacketQueue 已满或者超过最小缓存数量。read_thread 将会延迟10ms，
    // video_thread 或 audio_thread 没有读到 AVPacket 了就需要尽快唤醒 read_thread
    SDL_cond *continue_read_thread;

} VideoState;

/* options specified by the user */
static AVInputFormat *file_iformat;
static const char *input_filename;
static const char *window_title;
static int default_width  = 640;
static int default_height = 480;
static int screen_width  = 0;
static int screen_height = 0;
static int screen_left = SDL_WINDOWPOS_CENTERED;
static int screen_top = SDL_WINDOWPOS_CENTERED;
static int audio_disable;
static int video_disable;
static int subtitle_disable;
static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
static int seek_by_bytes = -1;
static float seek_interval = 10;
static int display_disable;
static int borderless;
static int alwaysontop;
static int startup_volume = 100;
static int show_status = -1;
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
static int64_t start_time = AV_NOPTS_VALUE;
static int64_t duration = AV_NOPTS_VALUE;
static int fast = 0;
static int genpts = 0;
static int lowres = 0;
static int decoder_reorder_pts = -1;
static int autoexit;
static int exit_on_keydown;
static int exit_on_mousedown;
static int loop = 1;
static int framedrop = -1;
static int infinite_buffer = -1;
static enum ShowMode show_mode = SHOW_MODE_NONE;
static const char *audio_codec_name;
static const char *subtitle_codec_name;
static const char *video_codec_name;
double rdftspeed = 0.02;
static int64_t cursor_last_shown;
static int cursor_hidden = 0;
#if CONFIG_AVFILTER
static const char **vfilters_list = NULL;
static int nb_vfilters = 0;
static char *afilters = NULL;
#endif
static int autorotate = 1;
static int find_stream_info = 1;
static int filter_nbthreads = 0;

/* current context */
static int is_full_screen;
static int64_t audio_callback_time;

#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)

static SDL_Window *window;
static SDL_Renderer *renderer;
static SDL_RendererInfo renderer_info = {0};
static SDL_AudioDeviceID audio_dev;

static const struct TextureFormatEntry {
    enum AVPixelFormat format;
    int texture_fmt;
} sdl_texture_format_map[] = {
    { AV_PIX_FMT_RGB8,           SDL_PIXELFORMAT_RGB332 },
    { AV_PIX_FMT_RGB444,         SDL_PIXELFORMAT_RGB444 },
    { AV_PIX_FMT_RGB555,         SDL_PIXELFORMAT_RGB555 },
    { AV_PIX_FMT_BGR555,         SDL_PIXELFORMAT_BGR555 },
    { AV_PIX_FMT_RGB565,         SDL_PIXELFORMAT_RGB565 },
    { AV_PIX_FMT_BGR565,         SDL_PIXELFORMAT_BGR565 },
    { AV_PIX_FMT_RGB24,          SDL_PIXELFORMAT_RGB24 },
    { AV_PIX_FMT_BGR24,          SDL_PIXELFORMAT_BGR24 },
    { AV_PIX_FMT_0RGB32,         SDL_PIXELFORMAT_RGB888 },
    { AV_PIX_FMT_0BGR32,         SDL_PIXELFORMAT_BGR888 },
    { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
    { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
    { AV_PIX_FMT_RGB32,          SDL_PIXELFORMAT_ARGB8888 },
    { AV_PIX_FMT_RGB32_1,        SDL_PIXELFORMAT_RGBA8888 },
    { AV_PIX_FMT_BGR32,          SDL_PIXELFORMAT_ABGR8888 },
    { AV_PIX_FMT_BGR32_1,        SDL_PIXELFORMAT_BGRA8888 },
    { AV_PIX_FMT_YUV420P,        SDL_PIXELFORMAT_IYUV },
    { AV_PIX_FMT_YUYV422,        SDL_PIXELFORMAT_YUY2 },
    { AV_PIX_FMT_UYVY422,        SDL_PIXELFORMAT_UYVY },
    { AV_PIX_FMT_NONE,           SDL_PIXELFORMAT_UNKNOWN },
};

#if CONFIG_AVFILTER
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
{
    GROW_ARRAY(vfilters_list, nb_vfilters);
    vfilters_list[nb_vfilters - 1] = arg;
    return 0;
}
#endif

static inline
int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
                   enum AVSampleFormat fmt2, int64_t channel_count2)
{
    /* If channel count == 1, planar and non-planar formats are the same */
    if (channel_count1 == 1 && channel_count2 == 1)
        return av_get_packed_sample_fmt(fmt1) != av_get_packed_sample_fmt(fmt2);
    else
        return channel_count1 != channel_count2 || fmt1 != fmt2;
}

static inline
int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
{
    if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
        return channel_layout;
    else
        return 0;
}

static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
{
    MyAVPacketList pkt1;

    if (q->abort_request)
       return -1;

    if (av_fifo_space(q->pkt_list) < sizeof(pkt1)) {
        if (av_fifo_grow(q->pkt_list, sizeof(pkt1)) < 0)
            return -1;
    }

    pkt1.pkt = pkt;
    // 将 PacketQueue 队列的 serial 和环形缓冲队里 MyAVPacketList serial 匹配。表示当前是一个播放序列
    pkt1.serial = q->serial;

    // 向 fifo 内存空间写数据
    av_fifo_generic_write(q->pkt_list, &pkt1, sizeof(pkt1), NULL);
    // 记录写入的 AVPacket 个数
    q->nb_packets++;
    // 记录写入队列的大小: 包括 AVPacket 的 size已经本身的大小
    q->size += pkt1.pkt->size + sizeof(pkt1);
    // 记录队里中所有 AVPacket 的时长
    q->duration += pkt1.pkt->duration;
    /* XXX: should duplicate packet data in DV case */
    SDL_CondSignal(q->cond);
    return 0;
}

static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
{
    AVPacket *pkt1;
    int ret;

    pkt1 = av_packet_alloc();
    if (!pkt1) {
        av_packet_unref(pkt);
        return -1;
    }
    // 将 pkt 的数据指向 pkt1, 然后重置 pkt
    // Move every field in src to dst and reset src
    av_packet_move_ref(pkt1, pkt);

    // 写数据时加锁
    SDL_LockMutex(q->mutex);
    ret = packet_queue_put_private(q, pkt1);
    // 写数据完成后释放锁
    SDL_UnlockMutex(q->mutex);

    if (ret < 0)
        av_packet_free(&pkt1);

    return ret;
}

static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
{
    pkt->stream_index = stream_index;
    return packet_queue_put(q, pkt);
}

/* packet queue handling */
static int packet_queue_init(PacketQueue *q)
{
    memset(q, 0, sizeof(PacketQueue)); // 初始化 PacketQueue
    // 分配内存空间，大小是 MyAVPacketList
    q->pkt_list = av_fifo_alloc(sizeof(MyAVPacketList));
    if (!q->pkt_list)
        return AVERROR(ENOMEM);

    printf("%s(%d) read space: %d, write space: %d\n", __FUNCTION__, __LINE__,
           av_fifo_size(q->pkt_list), av_fifo_space(q->pkt_list));

    q->mutex = SDL_CreateMutex();
    if (!q->mutex) {
        av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
        return AVERROR(ENOMEM);
    }
    q->cond = SDL_CreateCond();
    if (!q->cond) {
        av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
        return AVERROR(ENOMEM);
    }
    // 当这个字段设置为1时，audio_thread 和 video_thread 就会退出，在创建解码线程之前会置为0
    q->abort_request = 1;
    return 0;
}

static void packet_queue_flush(PacketQueue *q)
{
    MyAVPacketList pkt1;

    SDL_LockMutex(q->mutex);
    while (av_fifo_size(q->pkt_list) >= sizeof(pkt1)) {
        av_fifo_generic_read(q->pkt_list, &pkt1, sizeof(pkt1), NULL);
        av_packet_free(&pkt1.pkt);
    }
    q->nb_packets = 0;
    q->size = 0;
    q->duration = 0;
    q->serial++;   // PacketQueue 中的 serial + 1
    SDL_UnlockMutex(q->mutex);
}

static void packet_queue_destroy(PacketQueue *q)
{
    packet_queue_flush(q);
    av_fifo_freep(&q->pkt_list);
    SDL_DestroyMutex(q->mutex);
    SDL_DestroyCond(q->cond);
}

static void packet_queue_abort(PacketQueue *q)
{
    SDL_LockMutex(q->mutex);

    // 将队里的终止条件置1，下次执行时就会跳出播放
    q->abort_request = 1;

    SDL_CondSignal(q->cond);

    SDL_UnlockMutex(q->mutex);
}

static void packet_queue_start(PacketQueue *q)
{
    SDL_LockMutex(q->mutex);
    q->abort_request = 0; // 音视频队列 abort_request 置为0，开始解码

    // 队列开始时，第一次播放的 serial 是 1
    q->serial++;
    SDL_UnlockMutex(q->mutex);
}

/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
{
    MyAVPacketList pkt1;
    int ret;

    SDL_LockMutex(q->mutex);

    for (;;) {
        if (q->abort_request) {
            ret = -1;
            break;
        }

        if (av_fifo_size(q->pkt_list) >= sizeof(pkt1)) {
            av_fifo_generic_read(q->pkt_list, &pkt1, sizeof(pkt1), NULL);
            q->nb_packets--;
            // q->size: 为当前MyAVPacketList大小加上AVPacket的size大小
            q->size -= pkt1.pkt->size + sizeof(pkt1);
            // 更新时长
            q->duration -= pkt1.pkt->duration;
            av_packet_move_ref(pkt, pkt1.pkt);
            // 在这将读取到的 AVPacket
            // d->pkt_serial == pkt1.serial (这是环形缓冲队列里的 AVPacket )
            if (serial)
                *serial = pkt1.serial;
            av_packet_free(&pkt1.pkt);
            ret = 1;
            break;
        } else if (!block) {
            ret = 0;
            break;
        } else {
            SDL_CondWait(q->cond, q->mutex);
        }
    }
    SDL_UnlockMutex(q->mutex);
    return ret;
}

static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
    // 初始化 Decoder
    memset(d, 0, sizeof(Decoder));

    // 为什么要多分配一个 AVPacket?
    // 答：因为当解码器内部还没有满时，会返回 EAGAIN 因此需要提交将之前的一包数据保存
    d->pkt = av_packet_alloc();
    if (!d->pkt)
        return AVERROR(ENOMEM);

     // 解码器上下文
    d->avctx = avctx;

    // PacketQueue 音视频压缩包队列
    d->queue = queue;

    // empty_queue_cond 实际就是 VideoState 中的 is->continue_read_thread
    // 这里只是改了个名字
    d->empty_queue_cond = empty_queue_cond;

    // 还未开始时无效
    d->start_pts = AV_NOPTS_VALUE;

    // 初始化时指定为 -1
    d->pkt_serial = -1;

    return 0;
}

/**
 * 音视频解码帧都是这个函数
 * @brief decoder_decode_frame
 * @param d
 * @param frame
 * @param sub
 * @return
 */
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
    int ret = AVERROR(EAGAIN);

    for (;;) {
        // 1. 同一个序列时处理，刚开始两个不相等，因为 d->queue->serial=1, d->pkt_serial=-1
        // 2. 当前执行 seek 操作时，这里就会不相等，因为 seek 操作会清空 PacketQueu 队列，并将 serail++
        // 3. 等到 read_thread 插入一帧数据后，并且 avcodec_send_packet 发送了解码数据后才会被执行
        if (d->queue->serial == d->pkt_serial) {
            do {
                // 用户停止播放
                if (d->queue->abort_request)
                    return -1;

                switch (d->avctx->codec_type) {
                    case AVMEDIA_TYPE_VIDEO:
                        // 解码一帧视频
                        ret = avcodec_receive_frame(d->avctx, frame);

                        if (ret >= 0) {
                            // 排序pts 0=off 1=on -1=auto，默认为-1, 为什么 pts 通过一个算法得出呢?
                            if (decoder_reorder_pts == -1) {
                                //frame->best_effort_timestamp 是解码器算出来的大概每一帧视频播放的时间
                                frame->pts = frame->best_effort_timestamp;
                            } else if (!decoder_reorder_pts) {
                                frame->pts = frame->pkt_dts;
                            }

//                            printf("%s(%d) 视频帧pts: %lld:%lld\n", __FUNCTION__, __LINE__, frame->pts, frame->best_effort_timestamp);
                        }
                        break;
                    case AVMEDIA_TYPE_AUDIO:
                        // 从解码器解码一帧音频数据
                        ret = avcodec_receive_frame(d->avctx, frame);
                        if (ret >= 0) {
                            // 时间基为采样率的倒数
                            AVRational tb = (AVRational){1, frame->sample_rate};
                            // 1. 如果当前 frame->pts 合法，则根据解码器的时间基和流的时间基计算出一帧 frame->pts
                            // d->avctx->pkt_timebase 和 tb 都是基于音频采样率
                            // frame->pts 是采样个数的累加
                            if (frame->pts != AV_NOPTS_VALUE)
                                frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);

                            // 2. 如果 frame->pts 不合法，那么就以 d->next_pts 作为 pts
                            else if (d->next_pts != AV_NOPTS_VALUE)
                                frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);

                            // 3. frame->pts 合法，根据当前的 pts + 采样个数预测下一帧的 pts
                            // 有的 AVFrame 没有 pts, 那就需要根据 next_pts 去预测下一帧的 pts
                            if (frame->pts != AV_NOPTS_VALUE) {
                                d->next_pts = frame->pts + frame->nb_samples;
                                d->next_pts_tb = tb;
                            }

//                            printf("%s(%d) 音频帧pts: %lld\n", __FUNCTION__, __LINE__, frame->pts);
                        }
                        break;
                }

                // 解码器已经冲刷完毕
                if (ret == AVERROR_EOF) {
                    // 标记播放完成
                    d->finished = d->pkt_serial;
                    // 将解码器内部缓冲区全部刷出
                    avcodec_flush_buffers(d->avctx);
                    return 0;
                }
                if (ret >= 0)
                    return 1;
            } while (ret != AVERROR(EAGAIN)); // 解码器已经没数据要解码了，此时需要再往解码器丢数据
        }

        do {
            // 已经没有数据可以往解码器队列发送数据，PacketQueue 队列 AVPacket 读完，
            // 唤醒 read_thread 继续读取数据
            if (d->queue->nb_packets == 0) {
                SDL_CondSignal(d->empty_queue_cond);
            }

            // 当向解码器发送数据返回 EAGAIN 时，这个标志被置为 1，此时不需要再从 d->queue 队列中取数据
            if (d->packet_pending) {
                d->packet_pending = 0;
            } else {
                // 记录解码器上一次的序列
                int old_serial = d->pkt_serial;
                // 阻塞读取 AVPacket，获取一帧AVPacket，当获取不到 AVPacket 时就会一直阻塞
                if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
                    return -1;

                // 当进行了 seek 操作后，那么这里就不会不相等，这个时候就要将解码器内部缓冲区数据帧全部刷掉
                if (old_serial != d->pkt_serial) {
//                    printf("%s(%d) packet_queue_get d->queue->serial: %d, d->pkt_serial: %d, old_serial: %d\n",
//                           __FUNCTION__, __LINE__,
//                           d->queue->serial,
//                           d->pkt_serial,
//                           old_serial
//                           );
                    // ffmpeg 内部 api，用于重置解码器内部缓冲区
                    avcodec_flush_buffers(d->avctx);
                    // 重置为0
                    d->finished = 0;
                    d->next_pts = d->start_pts;
                    d->next_pts_tb = d->start_pts_tb;
                }
            }
            //重点: 判断取出的数据帧和队里的数据帧序列号是否相同，如果相同跳出循环，不相同则调用 av_packet_unref 释放
            if (d->queue->serial == d->pkt_serial)
                break;
            av_packet_unref(d->pkt);
        } while (1);

        // 关于字幕的不处理
        if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
            int got_frame = 0;
            ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
            if (ret < 0) {
                ret = AVERROR(EAGAIN);
            } else {
                if (got_frame && !d->pkt->data) {
                    d->packet_pending = 1;
                }
                ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
            }
            av_packet_unref(d->pkt);
        } else {
            // 向解码器发送压缩数据帧，如果此时返回 EAGAIN 说明 ffmpeg 内部解码缓冲区有问题，
            // 需要继续发一次，此时将 packet_pending=1
            if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
                av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
                d->packet_pending = 1;
            } else {
                av_packet_unref(d->pkt);
            }
        }
    }
}

static void decoder_destroy(Decoder *d) {
    // 释放 AVPacket 内存
    av_packet_free(&d->pkt);
    // 关闭解码器
    avcodec_free_context(&d->avctx);
}

static void frame_queue_unref_item(Frame *vp)
{
    av_frame_unref(vp->frame);
    avsubtitle_free(&vp->sub);
}

/**
 * 创建音视频解码后帧队列
 * @brief frame_queue_init
 * @param f
 * @param pktq
 * @param max_size
 * @param keep_last
 * @return
 */
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
{
    int i;
    memset(f, 0, sizeof(FrameQueue));  // 分配 FrameQueue 内存
    // 创建 FrameQueue 读写锁对象
    if (!(f->mutex = SDL_CreateMutex())) {
        av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
        return AVERROR(ENOMEM);
    }
    // 创建 FrameQueue 条件变量
    if (!(f->cond = SDL_CreateCond())) {
        av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
        return AVERROR(ENOMEM);
    }

    // 关联 PacketQueue 队列
    f->pktq = pktq;

    // video 默认最大是 3
    // audio 默认最大是 9
    // subtitle 默认最大是 16
    f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);

    // 将大于1的数设置为 0/1
    f->keep_last = !!keep_last;

    // 提前分配 max_size 的 AVFrame 加入到队列。
    for (i = 0; i < f->max_size; i++)
        if (!(f->queue[i].frame = av_frame_alloc()))
            return AVERROR(ENOMEM);
    return 0;
}

static void frame_queue_destory(FrameQueue *f)
{
    int i;
    for (i = 0; i < f->max_size; i++) {
        Frame *vp = &f->queue[i];
        frame_queue_unref_item(vp);
        av_frame_free(&vp->frame);
    }
    SDL_DestroyMutex(f->mutex);
    SDL_DestroyCond(f->cond);
}

static void frame_queue_signal(FrameQueue *f)
{
    SDL_LockMutex(f->mutex);
    SDL_CondSignal(f->cond);
    SDL_UnlockMutex(f->mutex);
}

static Frame *frame_queue_peek(FrameQueue *f)
{
    return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
}

static Frame *frame_queue_peek_next(FrameQueue *f)
{
    return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
}

static Frame *frame_queue_peek_last(FrameQueue *f)
{
    return &f->queue[f->rindex];
}

static Frame *frame_queue_peek_writable(FrameQueue *f)
{
    /* wait until we have space to put a new frame */
    SDL_LockMutex(f->mutex);
    while (f->size >= f->max_size &&
           !f->pktq->abort_request) {
        SDL_CondWait(f->cond, f->mutex);
    }
    SDL_UnlockMutex(f->mutex);

    if (f->pktq->abort_request)
        return NULL;

    return &f->queue[f->windex];
}

static Frame *frame_queue_peek_readable(FrameQueue *f)
{
    /* wait until we have a readable a new frame */
    SDL_LockMutex(f->mutex);
    while (f->size - f->rindex_shown <= 0 &&
           !f->pktq->abort_request) {
        SDL_CondWait(f->cond, f->mutex);
    }
    SDL_UnlockMutex(f->mutex);

    if (f->pktq->abort_request)
        return NULL;

    return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
}

static void frame_queue_push(FrameQueue *f)
{
    if (++f->windex == f->max_size)
        f->windex = 0;
    // 先锁住队列，防止其他线程修改
    SDL_LockMutex(f->mutex);
    f->size++;
    // 释放队列
    SDL_CondSignal(f->cond);
    SDL_UnlockMutex(f->mutex);
}

static void frame_queue_next(FrameQueue *f)
{
    if (f->keep_last && !f->rindex_shown) {
        f->rindex_shown = 1;
        return;
    }
    frame_queue_unref_item(&f->queue[f->rindex]);
    if (++f->rindex == f->max_size)
        f->rindex = 0;
    SDL_LockMutex(f->mutex);
    f->size--;
    SDL_CondSignal(f->cond);
    SDL_UnlockMutex(f->mutex);
}

/* return the number of undisplayed frames in the queue */
static int frame_queue_nb_remaining(FrameQueue *f)
{
    return f->size - f->rindex_shown;
}

/* return last shown position */
static int64_t frame_queue_last_pos(FrameQueue *f)
{
    Frame *fp = &f->queue[f->rindex];
    if (f->rindex_shown && fp->serial == f->pktq->serial)
        return fp->pos;
    else
        return -1;
}

static void decoder_abort(Decoder *d, FrameQueue *fq)
{
    // 重置状态abort_request=1,并唤醒 read_thread,
    packet_queue_abort(d->queue);
    // 唤醒q->cond video_thread 和 audio_thread
    frame_queue_signal(fq);
    // 等待线程退出
    SDL_WaitThread(d->decoder_tid, NULL);
    d->decoder_tid = NULL;
    packet_queue_flush(d->queue);
}

static inline void fill_rectangle(int x, int y, int w, int h)
{
    SDL_Rect rect;
    rect.x = x;
    rect.y = y;
    rect.w = w;
    rect.h = h;
    if (w && h)
        SDL_RenderFillRect(renderer, &rect);
}

static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
{
    Uint32 format;
    int access, w, h;
    if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
        void *pixels;
        int pitch;
        if (*texture)
            SDL_DestroyTexture(*texture);
        if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
            return -1;
        if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
            return -1;
        if (init_texture) {
            if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
                return -1;
            memset(pixels, 0, pitch * new_height);
            SDL_UnlockTexture(*texture);
        }
        av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
    }
    return 0;
}

static void calculate_display_rect(SDL_Rect *rect,
                                   int scr_xleft, int scr_ytop, int scr_width, int scr_height,
                                   int pic_width, int pic_height, AVRational pic_sar)
{
    AVRational aspect_ratio = pic_sar;
    int64_t width, height, x, y;

    if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
        aspect_ratio = av_make_q(1, 1);

    aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));

    /* XXX: we suppose the screen has a 1.0 pixel ratio */
    height = scr_height;
    width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
    if (width > scr_width) {
        width = scr_width;
        height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
    }
    x = (scr_width - width) / 2;
    y = (scr_height - height) / 2;
    rect->x = scr_xleft + x;
    rect->y = scr_ytop  + y;
    rect->w = FFMAX((int)width,  1);
    rect->h = FFMAX((int)height, 1);
}

static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
{
    int i;
    *sdl_blendmode = SDL_BLENDMODE_NONE;
    *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
    if (format == AV_PIX_FMT_RGB32   ||
        format == AV_PIX_FMT_RGB32_1 ||
        format == AV_PIX_FMT_BGR32   ||
        format == AV_PIX_FMT_BGR32_1)
        *sdl_blendmode = SDL_BLENDMODE_BLEND;
    for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
        if (format == sdl_texture_format_map[i].format) {
            *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
            return;
        }
    }
}

static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
    int ret = 0;
    Uint32 sdl_pix_fmt;
    SDL_BlendMode sdl_blendmode;
    get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
    if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
        return -1;
    switch (sdl_pix_fmt) {
        case SDL_PIXELFORMAT_UNKNOWN:
            /* This should only happen if we are not using avfilter... */
            *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
                frame->width, frame->height, frame->format, frame->width, frame->height,
                AV_PIX_FMT_BGRA, sws_flags, NULL, NULL, NULL);
            if (*img_convert_ctx != NULL) {
                uint8_t *pixels[4];
                int pitch[4];
                if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
                    sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
                              0, frame->height, pixels, pitch);
                    SDL_UnlockTexture(*tex);
                }
            } else {
                av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
                ret = -1;
            }
            break;
        case SDL_PIXELFORMAT_IYUV:
            if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
                ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
                                                       frame->data[1], frame->linesize[1],
                                                       frame->data[2], frame->linesize[2]);
            } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
                ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height                    - 1), -frame->linesize[0],
                                                       frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
                                                       frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
            } else {
                av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
                return -1;
            }
            break;
        default:
            if (frame->linesize[0] < 0) {
                ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
            } else {
                ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
            }
            break;
    }
    return ret;
}

static void set_sdl_yuv_conversion_mode(AVFrame *frame)
{
#if SDL_VERSION_ATLEAST(2,0,8)
    SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
    if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
        if (frame->color_range == AVCOL_RANGE_JPEG)
            mode = SDL_YUV_CONVERSION_JPEG;
        else if (frame->colorspace == AVCOL_SPC_BT709)
            mode = SDL_YUV_CONVERSION_BT709;
        else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M || frame->colorspace == AVCOL_SPC_SMPTE240M)
            mode = SDL_YUV_CONVERSION_BT601;
    }
    SDL_SetYUVConversionMode(mode);
#endif
}

static void video_image_display(VideoState *is)
{
    Frame *vp;
    Frame *sp = NULL;
    SDL_Rect rect;

    // 获取上一帧，其实这里已经偏移过了，在video_refresh 中
    vp = frame_queue_peek_last(&is->pictq);
    if (is->subtitle_st) {
        if (frame_queue_nb_remaining(&is->subpq) > 0) {
            sp = frame_queue_peek(&is->subpq);

            if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
                if (!sp->uploaded) {
                    uint8_t* pixels[4];
                    int pitch[4];
                    int i;
                    if (!sp->width || !sp->height) {
                        sp->width = vp->width;
                        sp->height = vp->height;
                    }
                    if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
                        return;

                    for (i = 0; i < sp->sub.num_rects; i++) {
                        AVSubtitleRect *sub_rect = sp->sub.rects[i];

                        sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
                        sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
                        sub_rect->w = av_clip(sub_rect->w, 0, sp->width  - sub_rect->x);
                        sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);

                        is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
                            sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
                            sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
                            0, NULL, NULL, NULL);
                        if (!is->sub_convert_ctx) {
                            av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
                            return;
                        }
                        if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
                            sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
                                      0, sub_rect->h, pixels, pitch);
                            SDL_UnlockTexture(is->sub_texture);
                        }
                    }
                    sp->uploaded = 1;
                }
            } else
                sp = NULL;
        }
    }

    calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);

    if (!vp->uploaded) {
        if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
            return;
        vp->uploaded = 1;
        vp->flip_v = vp->frame->linesize[0] < 0;
    }

    set_sdl_yuv_conversion_mode(vp->frame);
    SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
    set_sdl_yuv_conversion_mode(NULL);
    if (sp) {
#if USE_ONEPASS_SUBTITLE_RENDER
        SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
#else
        int i;
        double xratio = (double)rect.w / (double)sp->width;
        double yratio = (double)rect.h / (double)sp->height;
        for (i = 0; i < sp->sub.num_rects; i++) {
            SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
            SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
                               .y = rect.y + sub_rect->y * yratio,
                               .w = sub_rect->w * xratio,
                               .h = sub_rect->h * yratio};
            SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
        }
#endif
    }
}

static inline int compute_mod(int a, int b)
{
    return a < 0 ? a%b + b : a%b;
}

static void video_audio_display(VideoState *s)
{
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
    int ch, channels, h, h2;
    int64_t time_diff;
    int rdft_bits, nb_freq;

    for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
        ;
    nb_freq = 1 << (rdft_bits - 1);

    /* compute display index : center on currently output samples */
    channels = s->audio_tgt.channels;
    nb_display_channels = channels;
    if (!s->paused) {
        int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
        n = 2 * channels;
        delay = s->audio_write_buf_size;
        delay /= n;

        /* to be more precise, we take into account the time spent since
           the last buffer computation */
        if (audio_callback_time) {
            time_diff = av_gettime_relative() - audio_callback_time;
            delay -= (time_diff * s->audio_tgt.freq) / 1000000;
        }

        delay += 2 * data_used;
        if (delay < data_used)
            delay = data_used;

        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
        if (s->show_mode == SHOW_MODE_WAVES) {
            h = INT_MIN;
            for (i = 0; i < 1000; i += channels) {
                int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
                int a = s->sample_array[idx];
                int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
                int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
                int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
                int score = a - d;
                if (h < score && (b ^ c) < 0) {
                    h = score;
                    i_start = idx;
                }
            }
        }

        s->last_i_start = i_start;
    } else {
        i_start = s->last_i_start;
    }

    if (s->show_mode == SHOW_MODE_WAVES) {
        SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);

        /* total height for one channel */
        h = s->height / nb_display_channels;
        /* graph height / 2 */
        h2 = (h * 9) / 20;
        for (ch = 0; ch < nb_display_channels; ch++) {
            i = i_start + ch;
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
            for (x = 0; x < s->width; x++) {
                y = (s->sample_array[i] * h2) >> 15;
                if (y < 0) {
                    y = -y;
                    ys = y1 - y;
                } else {
                    ys = y1;
                }
                fill_rectangle(s->xleft + x, ys, 1, y);
                i += channels;
                if (i >= SAMPLE_ARRAY_SIZE)
                    i -= SAMPLE_ARRAY_SIZE;
            }
        }

        SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);

        for (ch = 1; ch < nb_display_channels; ch++) {
            y = s->ytop + ch * h;
            fill_rectangle(s->xleft, y, s->width, 1);
        }
    } else {
        if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
            return;

        if (s->xpos >= s->width)
            s->xpos = 0;
        nb_display_channels= FFMIN(nb_display_channels, 2);
        if (rdft_bits != s->rdft_bits) {
            av_rdft_end(s->rdft);
            av_free(s->rdft_data);
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
            s->rdft_bits = rdft_bits;
            s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
        }
        if (!s->rdft || !s->rdft_data){
            av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
            s->show_mode = SHOW_MODE_WAVES;
        } else {
            FFTSample *data[2];
            SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
            uint32_t *pixels;
            int pitch;
            for (ch = 0; ch < nb_display_channels; ch++) {
                data[ch] = s->rdft_data + 2 * nb_freq * ch;
                i = i_start + ch;
                for (x = 0; x < 2 * nb_freq; x++) {
                    double w = (x-nb_freq) * (1.0 / nb_freq);
                    data[ch][x] = s->sample_array[i] * (1.0 - w * w);
                    i += channels;
                    if (i >= SAMPLE_ARRAY_SIZE)
                        i -= SAMPLE_ARRAY_SIZE;
                }
                av_rdft_calc(s->rdft, data[ch]);
            }
            /* Least efficient way to do this, we should of course
             * directly access it but it is more than fast enough. */
            if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
                pitch >>= 2;
                pixels += pitch * s->height;
                for (y = 0; y < s->height; y++) {
                    double w = 1 / sqrt(nb_freq);
                    int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
                    int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
                                                        : a;
                    a = FFMIN(a, 255);
                    b = FFMIN(b, 255);
                    pixels -= pitch;
                    *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
                }
                SDL_UnlockTexture(s->vis_texture);
            }
            SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
        }
        if (!s->paused)
            s->xpos++;
    }
}

static void stream_component_close(VideoState *is, int stream_index)
{
    AVFormatContext *ic = is->ic;
    AVCodecParameters *codecpar;

    if (stream_index < 0 || stream_index >= ic->nb_streams)
        return;
    codecpar = ic->streams[stream_index]->codecpar;

    switch (codecpar->codec_type) {
    case AVMEDIA_TYPE_AUDIO:
        // 关闭解码器
        decoder_abort(&is->auddec, &is->sampq);
        // 关闭音频设备
        SDL_CloseAudioDevice(audio_dev);
        // 关闭音频解码器、释放内存资源
        decoder_destroy(&is->auddec);
        // 关闭采样器
        swr_free(&is->swr_ctx);
        // 释放分配的 is->audio_buf1 缓冲区
        av_freep(&is->audio_buf1);
        is->audio_buf1_size = 0;
        is->audio_buf = NULL;

        // 释放音频频谱相关，暂时不分析
        if (is->rdft) {
            av_rdft_end(is->rdft);
            av_freep(&is->rdft_data);
            is->rdft = NULL;
            is->rdft_bits = 0;
        }
        break;
    case AVMEDIA_TYPE_VIDEO:
         // 关闭解码器
        decoder_abort(&is->viddec, &is->pictq);
        // 关闭视频解码器、释放内存资源
        decoder_destroy(&is->viddec);
        break;
    case AVMEDIA_TYPE_SUBTITLE:
        // 关闭解码器
        decoder_abort(&is->subdec, &is->subpq);
        // 关闭字幕解码器、释放内存资源
        decoder_destroy(&is->subdec);
        break;
    default:
        break;
    }

    ic->streams[stream_index]->discard = AVDISCARD_ALL;
    switch (codecpar->codec_type) {
    case AVMEDIA_TYPE_AUDIO:
        is->audio_st = NULL;
        is->audio_stream = -1;
        break;
    case AVMEDIA_TYPE_VIDEO:
        is->video_st = NULL;
        is->video_stream = -1;
        break;
    case AVMEDIA_TYPE_SUBTITLE:
        is->subtitle_st = NULL;
        is->subtitle_stream = -1;
        break;
    default:
        break;
    }
}

static void stream_close(VideoState *is)
{
    /* XXX: use a special url_shutdown call to abort parse cleanly */
    is->abort_request = 1;
    SDL_WaitThread(is->read_tid, NULL);

    /* close each stream */
    if (is->audio_stream >= 0)
        stream_component_close(is, is->audio_stream);
    if (is->video_stream >= 0)
        stream_component_close(is, is->video_stream);
    if (is->subtitle_stream >= 0)
        stream_component_close(is, is->subtitle_stream);

    avformat_close_input(&is->ic);

    packet_queue_destroy(&is->videoq);
    packet_queue_destroy(&is->audioq);
    packet_queue_destroy(&is->subtitleq);

    /* free all pictures */
    frame_queue_destory(&is->pictq);
    frame_queue_destory(&is->sampq);
    frame_queue_destory(&is->subpq);
    SDL_DestroyCond(is->continue_read_thread);
    sws_freeContext(is->img_convert_ctx);
    sws_freeContext(is->sub_convert_ctx);
    av_free(is->filename);
    if (is->vis_texture)
        SDL_DestroyTexture(is->vis_texture);
    if (is->vid_texture)
        SDL_DestroyTexture(is->vid_texture);
    if (is->sub_texture)
        SDL_DestroyTexture(is->sub_texture);
    av_free(is);
}

static void do_exit(VideoState *is)
{
    if (is) {
        stream_close(is);  // 关闭音视频流
    }
    if (renderer)
        SDL_DestroyRenderer(renderer); // 关闭SDL熏染
    if (window)
        SDL_DestroyWindow(window); // 摧毁窗口
    uninit_opts();
#if CONFIG_AVFILTER
    av_freep(&vfilters_list);
#endif
    avformat_network_deinit();  // 释放network
    if (show_status)
        printf("\n");
    SDL_Quit();  // 退出SDL
    av_log(NULL, AV_LOG_QUIET, "%s", "");
    exit(0);
}

static void sigterm_handler(int sig)
{
    exit(123);
}

static void set_default_window_size(int width, int height, AVRational sar)
{
    SDL_Rect rect;
    int max_width  = screen_width  ? screen_width  : INT_MAX;
    int max_height = screen_height ? screen_height : INT_MAX;
    if (max_width == INT_MAX && max_height == INT_MAX)
        max_height = height;
    calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
    default_width  = rect.w;
    default_height = rect.h;
}

static int video_open(VideoState *is)
{
    int w,h;

    w = screen_width ? screen_width : default_width;
    h = screen_height ? screen_height : default_height;

    if (!window_title)
        window_title = input_filename;
    SDL_SetWindowTitle(window, window_title);

    SDL_SetWindowSize(window, w, h);
    SDL_SetWindowPosition(window, screen_left, screen_top);
    if (is_full_screen)
        SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
    SDL_ShowWindow(window);

    is->width  = w;
    is->height = h;

    return 0;
}

/* display the current picture, if any */
static void video_display(VideoState *is)
{
    if (!is->width)
        video_open(is);

    SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
    SDL_RenderClear(renderer);
    if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
        video_audio_display(is);
    else if (is->video_st)
        video_image_display(is);
    SDL_RenderPresent(renderer);
}

static double get_clock(Clock *c)
{
    if (*c->queue_serial != c->serial)
        return NAN;
    if (c->paused) {
        return c->pts;
    } else {
        double time = av_gettime_relative() / 1000000.0;
        //分析1: c->speed默认1.0,因此等价于c->pts_drift + time
        //分析2: c->pts_drift + time = c->pts - 之间记录的系统时间 + 当前系统时间，等价于: c->pts + 流逝时间
        return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
    }
}

static void set_clock_at(Clock *c, double pts, int serial, double time)
{
    c->pts = pts;           // 当前播放的时间点
    c->last_updated = time;  // 记录上一次时间点，微妙数
    c->pts_drift = c->pts - time; // 当前播放时间点-初始化时间点。总的流逝时间
    c->serial = serial; // 播放序列
//    printf("%s(%d) c->pts: %lf, c->last_updated: %lf, c->pts_drift: %lf, c->serial: %d\n",
//           __FUNCTION__,
//           __LINE__,
//           c->pts,
//           c->last_updated,
//           c->pts_drift,
//           c->serial
//           );
}

/**
 *
 * 一、初始化时: pts=NAN, serial=0
 *
 *
 * @brief set_clock
 * @param c
 * @param pts
 * @param serial
 */
static void set_clock(Clock *c, double pts, int serial)
{
    // 当前时间计算成微妙数，这么做的目的应该是消除浮点误差
    double time = av_gettime_relative() / 1000000.0;
    set_clock_at(c, pts, serial, time);
}

static void set_clock_speed(Clock *c, double speed)
{
    set_clock(c, get_clock(c), c->serial);
    c->speed = speed;
}

static void init_clock(Clock *c, int *queue_serial)
{
    c->speed = 1.0;
    c->paused = 0;
    c->queue_serial = queue_serial;
    set_clock(c, NAN, -1);
}

static void sync_clock_to_slave(Clock *c, Clock *slave)
{
    // 外部时钟流逝时间
    double clock = get_clock(c);
    // 音视频时钟流逝过的时间
    double slave_clock = get_clock(slave);
    if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
        set_clock(c, slave_clock, slave->serial);
}

static int get_master_sync_type(VideoState *is) {
    // 以视频为主时钟
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
        if (is->video_st)
            return AV_SYNC_VIDEO_MASTER;
        else
            return AV_SYNC_AUDIO_MASTER;
    }

    // 以音频为主时钟（常用)
    else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
        if (is->audio_st)
            return AV_SYNC_AUDIO_MASTER;
        else
            return AV_SYNC_EXTERNAL_CLOCK;
    } else {
        return AV_SYNC_EXTERNAL_CLOCK;
    }
}

/* get the current master clock value */
static double get_master_clock(VideoState *is)
{
    double val;

    switch (get_master_sync_type(is)) {
        case AV_SYNC_VIDEO_MASTER:
            val = get_clock(&is->vidclk);
            break;
        case AV_SYNC_AUDIO_MASTER:
            val = get_clock(&is->audclk);
            break;
        default:
            val = get_clock(&is->extclk);
            break;
    }
    return val;
}

static void check_external_clock_speed(VideoState *is) {
   if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
       is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
       set_clock_speed(&is->extclk, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->extclk.speed - EXTERNAL_CLOCK_SPEED_STEP));
   } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
              (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
       set_clock_speed(&is->extclk, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->extclk.speed + EXTERNAL_CLOCK_SPEED_STEP));
   } else {
       double speed = is->extclk.speed;
       if (speed != 1.0)
           set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
   }
}

/* seek in the stream */
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
{
    if (!is->seek_req) {
        is->seek_pos = pos;
        is->seek_rel = rel;
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
        if (seek_by_bytes)
            is->seek_flags |= AVSEEK_FLAG_BYTE;
        is->seek_req = 1; //  read_thread 线程被再次调用的时候会触发 seek 操作
        SDL_CondSignal(is->continue_read_thread); // 通知 read_thread 线程继续执行
    }
}

/* pause or resume the video */
static void stream_toggle_pause(VideoState *is)
{
    if (is->paused) {
        // 更新frame_timer，目的是防止视频因为暂停后恢复丢帧，因此要将视频从当前时刻开始重新记录
        is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
        if (is->read_pause_return != AVERROR(ENOSYS)) {
            is->vidclk.paused = 0;
        }
        set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
    }
    // 更新外部时钟
    set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
    // 音视频的paused状态置为1
    is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
}

/**
 * 暂停播放
 * @brief toggle_pause
 * @param is
 */
static void toggle_pause(VideoState *is)
{
    stream_toggle_pause(is);
    is->step = 0;
}

static void toggle_mute(VideoState *is)
{
    is->muted = !is->muted;
}

static void update_volume(VideoState *is, int sign, double step)
{
    double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
    int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
    is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
}

static void step_to_next_frame(VideoState *is)
{
    /* if the stream is paused unpause it, then step */
    // 如果当前是暂停状态，恢复成启动状态
    if (is->paused)
        stream_toggle_pause(is);
    is->step = 1;
}

static double compute_target_delay(double delay, VideoState *is)
{
    double sync_threshold, diff = 0;

    /* update delay to follow master synchronisation source */
    if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
        /* if video is slave, we try to correct big delays by
           duplicating or deleting a frame */

        // get_clock(&is->vidclk) 计算视频流当前播放到哪里
        // 计算视频和音频播放的时间差
        diff = get_clock(&is->vidclk) - get_master_clock(is);

        /* skip or repeat frame. We take into account the
           delay to compute the threshold. I still don't know
           if it is the best guess */
        //1. 对于 1/12帧的视频，delay 是 0.082，所以 sync_threshold 等于 0.082，等于一帧的播放时长。
        //2. 对于 1/24 帧的视频，delay 是 0.041，所以 sync_threshold 等于 0.041，等于一帧的播放时长。
        //3. 对于 1/48 帧的视频，delay 是 0.0205，所以 sync_threshold 等于 0.04，约等于两帧的播放时长。
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
        if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
            // 视频比音频慢，此时丢掉视频帧delay=0
            if (diff <= -sync_threshold)
                delay = FFMAX(0, delay + diff);

            //视频比音频快，延迟时间：delay + diff
            else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
                delay = delay + diff;

             //视频比音频快，延迟时间：2 * delay
            else if (diff >= sync_threshold)
                delay = 2 * delay;
        }
    }

    av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
            delay, -diff);

    return delay;
}

/**
 * 计算当前帧显示时长
 * @brief vp_duration
 * @param is
 * @param current_vp
 * @param nextvp
 * @return
 */
static double vp_duration(VideoState *is, Frame *current_vp, Frame *nextvp) {
    if (current_vp->serial == nextvp->serial) {
        double duration = nextvp->pts - current_vp->pts;
        if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
            return current_vp->duration;
        else
            return duration;
    } else {
        return 0.0;
    }
}

static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
    /* update current video pts */
    set_clock(&is->vidclk, pts, serial);
    sync_clock_to_slave(&is->extclk, &is->vidclk);
}

/* called to display each frame */
static void video_refresh(void *opaque, double *remaining_time)
{
    VideoState *is = opaque;
    double time;

    Frame *sp, *sp2;

    // 外部时钟: 一般是实时播放流才会用到
    if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
        check_external_clock_speed(is);

    // 暂不考虑: 显示音频声音频率图
    if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
        // 根据系统时间计算当前流逝了多少us
        time = av_gettime_relative() / 1000000.0;
        if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
            video_display(is);
            is->last_vis_time = time;
        }
        *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
    }

    // 显示视频
    if (is->video_st) {
retry:
        // 检查是否还有更多视频帧要显示
        if (frame_queue_nb_remaining(&is->pictq) == 0) {
            // nothing to do, no picture to display in the queue
        } else {
            double last_duration, duration, delay;
            Frame *vp, *lastvp;

            /* dequeue the picture */
            // 当前显示在窗口的一帧,也可以表示为上一帧，通过索引 rindex 读取
            lastvp = frame_queue_peek_last(&is->pictq);
            // 获取下一帧数据，通过索引 rindex + rindex_shown 获取
            vp = frame_queue_peek(&is->pictq);

            // 快进快退时会改变播放序列，这里是不断查找队里中 vp 和播放队列中第一个相等的帧
            if (vp->serial != is->videoq.serial) {
                frame_queue_next(&is->pictq);
                goto retry;
            }

            // 快进快退时更新 frame_timer 的值
            if (lastvp->serial != vp->serial) {
                // 帧是何时开始播放
                is->frame_timer = av_gettime_relative() / 1000000.0;
            }

            // 如果是暂停时，直接显示当前帧
            if (is->paused) {
                printf("当前暂停显示当前帧....\n");
                goto display;
            }

            /* compute nominal last_duration */
            // 计算当前帧需要播放多久时间，lastvp: 当前显示的帧，vp: 下一帧
            last_duration = vp_duration(is, lastvp, vp);
            // 计算当前画面需要播放多久
            delay = compute_target_delay(last_duration, is);
//            printf("%s(%d)last_duration: %lf, delay: %lf\n", __FUNCTION__, __LINE__, last_duration, delay);

            time= av_gettime_relative()/1000000.0;
            // 计算下一帧要显示时的延迟时间  remaining_time
            // is->frame_timer + delay 当前帧在屏幕上要显示的时长
            if (time < is->frame_timer + delay) {
                *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
                goto display;
            }

            // 记录当前播放时间播了多久
            is->frame_timer += delay;
            // 视频播放卡了，如果当前帧播放时间太久，这里是大于 0.1 秒，那么就需要重置当前播放时间
            if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
                is->frame_timer = time;

            SDL_LockMutex(is->pictq.mutex);
            if (!isnan(vp->pts)) {
//                printf("%s(%d) vp->pts: %lf, pos: %lld, serial: %d\n",__FUNCTION__, __LINE__, vp->pts, vp->pos, vp->serial);
                // 更新视频时钟，这里是以 pts 作为基准
               update_video_pts(is, vp->pts, vp->pos, vp->serial);
            }
            SDL_UnlockMutex(is->pictq.mutex);

//            printf("%s(%d) is->frame_timer: %lf, time: %lf, time - frame_timer: %lf\n", __FUNCTION__, __LINE__, is->frame_timer, time, (time - is->frame_timer));

            // 因为上一帧播放时间太久，导致后面的帧不能在给定的播放时间窗口内播放，需要丢弃
            if (frame_queue_nb_remaining(&is->pictq) > 1) {
                Frame *nextvp = frame_queue_peek_next(&is->pictq);
                duration = vp_duration(is, vp, nextvp);
                if(!is->step
                    && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER))
                    && time > is->frame_timer + duration){
                    // 由于播放卡顿，假设没帧播放0.04s，由于系统卡顿，第三帧显示了0.1s，那么time > is->frame_timer + duration
                    is->frame_drops_late++;
                    frame_queue_next(&is->pictq);
                    goto retry;
                }
            }

            // 不是重点： 处理字母显示
            if (is->subtitle_st) {
                while (frame_queue_nb_remaining(&is->subpq) > 0) {
                    sp = frame_queue_peek(&is->subpq);

                    if (frame_queue_nb_remaining(&is->subpq) > 1)
                        sp2 = frame_queue_peek_next(&is->subpq);
                    else
                        sp2 = NULL;

                    if (sp->serial != is->subtitleq.serial
                            || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
                            || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
                    {
                        if (sp->uploaded) {
                            int i;
                            for (i = 0; i < sp->sub.num_rects; i++) {
                                AVSubtitleRect *sub_rect = sp->sub.rects[i];
                                uint8_t *pixels;
                                int pitch, j;

                                if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
                                    for (j = 0; j < sub_rect->h; j++, pixels += pitch)
                                        memset(pixels, 0, sub_rect->w << 2);
                                    SDL_UnlockTexture(is->sub_texture);
                                }
                            }
                        }
                        frame_queue_next(&is->subpq);
                    } else {
                        break;
                    }
                }
            }

            // 条件成立后，进行强制刷新
            // 将 rindex++，就是更换要显示的上一帧
            frame_queue_next(&is->pictq);
            // 这里置为1，表示刷新开始
            is->force_refresh = 1;

            // 逐帧播放，播放完一帧，如果当前是逐帧播放，立即进入暂停状态
            if (is->step && !is->paused)
                stream_toggle_pause(is);
        }
display:
        /* display picture */
        //is->pictq.rindex_shown 当第一帧插入到 FrameQueue 队里是被置为 1
        if (!display_disable
            && is->force_refresh
            && is->show_mode == SHOW_MODE_VIDEO
            && is->pictq.rindex_shown)
            video_display(is);
    }
    is->force_refresh = 0;

    // 打印音频信息到控制台
    if (show_status) {
        AVBPrint buf;
        static int64_t last_time;
        int64_t cur_time;
        int aqsize, vqsize, sqsize;
        double av_diff;

        cur_time = av_gettime_relative();
        if (!last_time || (cur_time - last_time) >= 30000) {
            aqsize = 0;
            vqsize = 0;
            sqsize = 0;
            if (is->audio_st)
                aqsize = is->audioq.size;
            if (is->video_st)
                vqsize = is->videoq.size;
            if (is->subtitle_st)
                sqsize = is->subtitleq.size;
            av_diff = 0;
            if (is->audio_st && is->video_st)
                av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
            else if (is->video_st)
                av_diff = get_master_clock(is) - get_clock(&is->vidclk);
            else if (is->audio_st)
                av_diff = get_master_clock(is) - get_clock(&is->audclk);

            av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
            av_bprintf(&buf,
                      "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
                      get_master_clock(is),
                      (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : "   ")),
                      av_diff,
                      is->frame_drops_early + is->frame_drops_late,
                      aqsize / 1024,
                      vqsize / 1024,
                      sqsize,
                      is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0,
                      is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0);

            if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
                fprintf(stderr, "%s", buf.str);
            else
                av_log(NULL, AV_LOG_INFO, "%s", buf.str);

            fflush(stderr);
            av_bprint_finalize(&buf, NULL);

            last_time = cur_time;
        }
    }
}

static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
{
    Frame *vp;

#if defined(DEBUG_SYNC)
    printf("frame_type=%c pts=%0.3f\n",
           av_get_picture_type_char(src_frame->pict_type), pts);
#endif

    // 从 FrameQueue 中获取一个 Frame
    if (!(vp = frame_queue_peek_writable(&is->pictq)))
        return -1;

    // 视频显示宽高比
    vp->sar = src_frame->sample_aspect_ratio;
    vp->uploaded = 0;

    // 宽
    vp->width = src_frame->width;
    // 高
    vp->height = src_frame->height;
    // 像素格式
    vp->format = src_frame->format;

    // 显示时间戳
    vp->pts = pts;
    // 这一帧显示时间
    vp->duration = duration;
    // 这一帧播放位置在文件的位置
    vp->pos = pos;
    // 显示序列
    vp->serial = serial;

    // 更新窗口显示宽高比
    set_default_window_size(vp->width, vp->height, vp->sar);

    // 将 src 引用的数据拷贝到 vp->frame，然后重置 src_frame
    av_frame_move_ref(vp->frame, src_frame);

    // 插入一帧视频 is->pictq
    frame_queue_push(&is->pictq);

    return 0;
}

static int get_video_frame(VideoState *is, AVFrame *frame)
{
    int got_picture;

    // 1. decoder_decode_frame 从解码器中解码一帧视频数据
    // 返回值: 0 解码完成  1 正常获取到一帧  -1 停止解码
    if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
        return -1;

    if (got_picture) {
        double dpts = NAN;

        if (frame->pts != AV_NOPTS_VALUE) {
            dpts = av_q2d(is->video_st->time_base) * frame->pts;
        }

        // 视频宽、高比的关系，可能是实际播放帧的宽高比有变换？
        frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);

        // 1. framedrop 默认是 -1，所以if一定会进去，并且默认是以音频为主时钟
        // 2. diff = dpts - get_master_clock(is); dpts 为视频流当前显示的 pts 减去音频同步时钟
        // 3. 当diff有差异并且小于10s，则丢帧处理
        if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
            if (frame->pts != AV_NOPTS_VALUE) {
                double diff = dpts - get_master_clock(is);
                if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
                    diff - is->frame_last_filter_delay < 0 && // 视频比音频慢
                    is->viddec.pkt_serial == is->vidclk.serial &&  // 序列号一致
                    is->videoq.nb_packets) {  // 视频队里还有数据
                    is->frame_drops_early++;  // 记录丢帧数量
                    av_frame_unref(frame);
                    got_picture = 0;
                }
            }
        }
    }

    return got_picture;
}

#if CONFIG_AVFILTER
static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
                                 AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
{
    int ret, i;
    int nb_filters = graph->nb_filters;
    AVFilterInOut *outputs = NULL, *inputs = NULL;

    // 如果传入参数时有指定滤镜参数
    if (filtergraph) {
        outputs = avfilter_inout_alloc();
        inputs  = avfilter_inout_alloc();
        if (!outputs || !inputs) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        outputs->name       = av_strdup("in");
        outputs->filter_ctx = source_ctx;
        outputs->pad_idx    = 0;
        outputs->next       = NULL;

        inputs->name        = av_strdup("out");
        inputs->filter_ctx  = sink_ctx;
        inputs->pad_idx     = 0;
        inputs->next        = NULL;

        if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
            goto fail;
    } else {
        if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
            goto fail;
    }

    /* Reorder the filters to ensure that inputs of the custom filters are merged first */
    for (i = 0; i < graph->nb_filters - nb_filters; i++)
        FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);

    // 正式打开滤镜
    ret = avfilter_graph_config(graph, NULL);
fail:
    avfilter_inout_free(&outputs);
    avfilter_inout_free(&inputs);
    return ret;
}

static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
{
    enum AVPixelFormat pix_fmts[FF_ARRAY_ELEMS(sdl_texture_format_map)];
    char sws_flags_str[512] = "";
    char buffersrc_args[256];
    int ret;
    AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
    AVCodecParameters *codecpar = is->video_st->codecpar;
    // 从视频解码器和视频流猜测帧率
    AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
    AVDictionaryEntry *e = NULL;
    int nb_pix_fmts = 0;
    int i, j;

    // SDL 支持的熏染格式
    for (i = 0; i < renderer_info.num_texture_formats; i++) {
        for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
            if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
                pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
                break;
            }
        }
    }
    // 在支持的熏染格式结束的时候插入一个NONE，例如 [AV_PIX_FMT_BGRA, AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE]
    pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;

    while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
        if (!strcmp(e->key, "sws_flags")) {
            av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
        } else
            av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
    }
    if (strlen(sws_flags_str))
        sws_flags_str[strlen(sws_flags_str)-1] = '\0';

    graph->scale_sws_opts = av_strdup(sws_flags_str);

    // 设置 buffer 滤镜参数
    // "video_size=720x576:pix_fmt=0:time_base=1/25025:pixel_aspect=1/1"
    snprintf(buffersrc_args, sizeof(buffersrc_args),
             "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
             frame->width, frame->height, frame->format,
             is->video_st->time_base.num, is->video_st->time_base.den,
             codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));

    // 帧率
    if (fr.num && fr.den)
        av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);

    // 创建buffer入口滤镜
    if ((ret = avfilter_graph_create_filter(&filt_src,
                                            avfilter_get_by_name("buffer"),
                                            "ffplay_buffer", buffersrc_args, NULL,
                                            graph)) < 0)
        goto fail;

    // 创建buffersink出口滤镜
    ret = avfilter_graph_create_filter(&filt_out,
                                       avfilter_get_by_name("buffersink"),
                                       "ffplay_buffersink", NULL, NULL, graph);
    if (ret < 0)
        goto fail;

    if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts,  AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
        goto fail;

    last_filter = filt_out;

/* Note: this macro adds a filter before the lastly added filter, so the
 * processing order of the filters is in reverse */
#define INSERT_FILT(name, arg) do {                                          \
    AVFilterContext *filt_ctx;                                               \
                                                                             \
    ret = avfilter_graph_create_filter(&filt_ctx,                            \
                                       avfilter_get_by_name(name),           \
                                       "ffplay_" name, arg, NULL, graph);    \
    if (ret < 0)                                                             \
        goto fail;                                                           \
                                                                             \
    ret = avfilter_link(filt_ctx, 0, last_filter, 0);                        \
    if (ret < 0)                                                             \
        goto fail;                                                           \
                                                                             \
    last_filter = filt_ctx;                                                  \
} while (0)

    // 自动旋转
    if (autorotate) {
        double theta  = get_rotation(is->video_st);

        if (fabs(theta - 90) < 1.0) {
            INSERT_FILT("transpose", "clock");
        } else if (fabs(theta - 180) < 1.0) {
            INSERT_FILT("hflip", NULL);
            INSERT_FILT("vflip", NULL);
        } else if (fabs(theta - 270) < 1.0) {
            INSERT_FILT("transpose", "cclock");
        } else if (fabs(theta) > 1.0) {
            char rotate_buf[64];
            snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
            INSERT_FILT("rotate", rotate_buf);
        }
    }

    if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
        goto fail;

    // 入口滤镜
    is->in_video_filter  = filt_src;
    // 出口滤镜
    is->out_video_filter = filt_out;

fail:
    return ret;
}

static int configure_audio_filters(VideoState *videoState, const char *afilters, int force_output_format)
{
    static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
    int sample_rates[2] = { 0, -1 };
    int64_t channel_layouts[2] = { 0, -1 };
    int channels[2] = { 0, -1 };
    AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
    char aresample_swr_opts[512] = "";
    AVDictionaryEntry *e = NULL;
    char asrc_args[256];
    int ret;

    // 为什么会释放滤镜?
    // 因为这个函数会被调用两次，第一次是在 stream_component_open
    // 第二次在 audio_thread
    avfilter_graph_free(&videoState->agraph);
    // 分配音频滤镜
    if (!(videoState->agraph = avfilter_graph_alloc()))
        return AVERROR(ENOMEM);
    // 指定滤镜处理线程
    videoState->agraph->nb_threads = filter_nbthreads;

    while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
        av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
    if (strlen(aresample_swr_opts))
        aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
    av_opt_set(videoState->agraph, "aresample_swr_opts", aresample_swr_opts, 0);

    // 设置音频输入音频滤镜参数, 采样率、采样精度、通道数、时间基（这里是采样率的倒数)
    // av_get_sample_fmt_name获取采样精度的名称，例如 AV_SAMPLE_FMT_U8=u8, AV_SAMPLE_FMT_FLTP:fltp
    ret = snprintf(asrc_args, sizeof(asrc_args),
                   "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
                   videoState->audio_filter_src.freq,
                   av_get_sample_fmt_name(videoState->audio_filter_src.fmt),
                   videoState->audio_filter_src.channels,
                   1,
                   videoState->audio_filter_src.freq);
    if (videoState->audio_filter_src.channel_layout)
        snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
                 ":channel_layout=0x%"PRIx64,  videoState->audio_filter_src.channel_layout);


    // 创建音频输入滤镜 abuffer
    // 例子: "abuffer=sample_rate=44100:sample_fmt=fltp:channels=2:time_base=1/44100:channel_layout=3"
    ret = avfilter_graph_create_filter(&filt_asrc,
                                       avfilter_get_by_name("abuffer"), "ffplay_abuffer",
                                       asrc_args, NULL, videoState->agraph);
    if (ret < 0)
        goto end;

    // 创建音频输出滤镜 abuffersink
    ret = avfilter_graph_create_filter(&filt_asink,
                                       avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
                                       NULL, NULL, videoState->agraph);
    if (ret < 0)
        goto end;

    // 设置
    if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts,  AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
        goto end;
    if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
        goto end;

    // 在 stream_component_open 时置0，在 audio_thread 置1
    if (force_output_format) {
        channel_layouts[0] = videoState->audio_tgt.channel_layout;
        channels       [0] = videoState->audio_tgt.channel_layout ? -1 : videoState->audio_tgt.channels;
        sample_rates   [0] = videoState->audio_tgt.freq;
        if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
            goto end;
        if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts,  -1, AV_OPT_SEARCH_CHILDREN)) < 0)
            goto end;
        if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels       ,  -1, AV_OPT_SEARCH_CHILDREN)) < 0)
            goto end;
        if ((ret = av_opt_set_int_list(filt_asink, "sample_rates"   , sample_rates   ,  -1, AV_OPT_SEARCH_CHILDREN)) < 0)
            goto end;
    }


    // 打开滤镜
    if ((ret = configure_filtergraph(videoState->agraph, afilters, filt_asrc, filt_asink)) < 0)
        goto end;

    // filt_asrc输入滤镜
    videoState->in_audio_filter  = filt_asrc;
    // filt_asink输出滤镜
    videoState->out_audio_filter = filt_asink;

end:  // 出错后释放滤镜
    if (ret < 0)
        avfilter_graph_free(&videoState->agraph);
    return ret;
}
#endif  /* CONFIG_AVFILTER */

static int audio_thread(void *arg)
{
    // ffplay 播放器总管家
    VideoState *is = arg;

    // 创建一帧 AVFrame
    AVFrame *frame = av_frame_alloc();

    //这里是我添加的 创建一个新的 AVFrame 用来观察音频滤镜前后的变化
    AVFrame *out_frame = av_frame_alloc();

    // 重采样后的音频帧
    Frame *af;

#if CONFIG_AVFILTER
    int last_serial = -1;
    int64_t dec_channel_layout;
    int reconfigure;
#endif
    int got_frame = 0;
    AVRational tb;
    int ret = 0;

    if (!frame)
        return AVERROR(ENOMEM);

    do {
        // 解码一帧音频
        // 1: 获取到一帧数据
        // 0: 帧已经读取完毕，但不会退出解码线程，因为用户可能会后退播放
        //-1: 解码一帧视频失败，通常是调用 Decoder->queue->abort_request
        if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
            goto the_end;

        if (got_frame) {
                // 以音频采样率为时间基
                tb = (AVRational){1, frame->sample_rate};

#if CONFIG_AVFILTER
                // 根据一帧音频帧得到声道布局
                dec_channel_layout = get_valid_channel_layout(frame->channel_layout, frame->channels);

                // 1. 判断获取的帧 AVFrame 和重采样器出来的采样精度是否一致 cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels, frame->format, frame->channels)
                // 2. 判断声道布局是否一致 is->audio_filter_src.channel_layout != dec_channel_layout
                // 3. 判断采样率 is->audio_filter_src.freq != frame->sample_rate
                // 4, 判断序列是否不相等 is->auddec.pkt_serial != last_serial。第一次一定不相等，所以会执行重采样
                reconfigure =
                    cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
                                   frame->format, frame->channels)    ||
                    is->audio_filter_src.channel_layout != dec_channel_layout ||
                    is->audio_filter_src.freq           != frame->sample_rate ||
                    is->auddec.pkt_serial               != last_serial;

                // 检查 AVFrame 帧是否和音频滤镜不一致，不一致则进行重新滤镜初始化
                if (reconfigure) {
                    char buf1[1024], buf2[1024];
                    av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
                    av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
                    av_log(NULL, AV_LOG_DEBUG,
                           "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
                           is->audio_filter_src.freq,
                           is->audio_filter_src.channels,
                           av_get_sample_fmt_name(is->audio_filter_src.fmt),
                           buf1,
                           last_serial,
                           frame->sample_rate,
                           frame->channels,
                           av_get_sample_fmt_name(frame->format),
                           buf2,
                           is->auddec.pkt_serial);

                    is->audio_filter_src.fmt            = frame->format;
                    is->audio_filter_src.channels       = frame->channels;
                    is->audio_filter_src.channel_layout = dec_channel_layout;
                    is->audio_filter_src.freq           = frame->sample_rate;
                     // 更新序列号，这样下次就相等
                    last_serial                         = is->auddec.pkt_serial;

                    // 初始化音频滤镜
                    // afilters: 用户指定的音频滤镜参数
                    if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
                        goto the_end;
                }

            // 往滤镜 abuffer 中添加一帧数据
            if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
                goto the_end;

            // 从滤镜输出 abuffersink 取一帧数据，这里使用 while 循环是保证滤镜缓冲区数据全部被刷出来
            while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, out_frame, 0)) >= 0) {
                // 获取重采样后时间基
                tb = av_buffersink_get_time_base(is->out_audio_filter); // 获取音频时间基
#endif
                // 标记音频帧队列可写
                // 如果队列满了，是不能继续往队列里插入数据，比如等待消费数据
                if (!(af = frame_queue_peek_writable(&is->sampq)))
                    goto the_end;

                // 经过重采样后，更新音频帧 pts
                af->pts = (out_frame->pts == AV_NOPTS_VALUE) ? NAN : out_frame->pts * av_q2d(tb);

                // reordered pos from the last AVPacket that has been input into the decoder
                af->pos = out_frame->pkt_pos;

                // 播放序列
                af->serial = is->auddec.pkt_serial;

                // 根据采样个数除以采样率得到一帧播放时长，拿 44100Hz 采样率， 采样个数为 1024,那么得到
                // 1024/44100 ≈ 0.023219954648秒
                af->duration = av_q2d((AVRational){out_frame->nb_samples, out_frame->sample_rate});

                // 将 frame 的数据拷贝到 af->frame，然后重置 frame
                av_frame_move_ref(af->frame, out_frame);
                av_frame_unref(frame);

                // 将重采样后的 frame 插入到 is->sampq 队列中
                frame_queue_push(&is->sampq);

#if CONFIG_AVFILTER
                if (is->audioq.serial != is->auddec.pkt_serial)
                    break;
            }
            if (ret == AVERROR_EOF)
                is->auddec.finished = is->auddec.pkt_serial;
#endif
        }
    } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
 the_end:
#if CONFIG_AVFILTER
    avfilter_graph_free(&is->agraph);
#endif
    av_frame_free(&frame);
    av_frame_free(&out_frame); // 这里是我添加的
    return ret;
}

static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
{
    // 1.设置 q->abort_request = 0;
    // 2. q->serial++; 即为1
    packet_queue_start(d->queue);

    // 创建线程，执行线程回调函数 fn
    d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
    if (!d->decoder_tid) {
        av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
        return AVERROR(ENOMEM);
    }
    return 0;
}

/**
 * 视频解码线程
 * @brief video_thread
 * @param arg
 * @return
 */
static int video_thread(void *arg)
{
    VideoState *is = arg;
    // 分配 AVFrame
    AVFrame *frame = av_frame_alloc();
    double pts;
    double duration;
    int ret;
    // 视频流的时间基
    AVRational tb = is->video_st->time_base;
    // 一般为25/1，每秒25帧
    AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);

#if CONFIG_AVFILTER
    AVFilterGraph *graph = NULL;
    AVFilterContext *filt_out = NULL, *filt_in = NULL;
    int last_w = 0;
    int last_h = 0;
    enum AVPixelFormat last_format = -2;
    int last_serial = -1;
    int last_vfilter_idx = 0;
#endif

    if (!frame)
        return AVERROR(ENOMEM);

    for (;;) {
        // 1. 获取 AVFrame
        // 返回值:
        // 0 销毁过期的帧完成
        // 1 解码一帧完成
        // -1 终止解码
        ret = get_video_frame(is, frame);

        if (ret < 0)
            goto the_end;
        if (!ret)
            continue;

// 视频滤镜
#if CONFIG_AVFILTER
        // 1. last_w != frame->width 当新的帧宽度和上一帧不相等是，需要重新配置滤镜
        // 2. last_h != frame->height 当新的帧高度和上一帧不相等是，需要重新配置滤镜
        // 3. last_format != frame->format 当前上一帧的图像格式和新的帧不一致，需要重新配置滤镜
        // 4. last_serial != is->viddec.pkt_serial  当播放序列不是同一个，也需要配置滤镜
        // 5. last_vfilter_idx != is->vfilter_idx  是否配置了多个视频滤镜，如果有的话要判断当前的滤镜索引和上一次的是否相等
        if (   last_w != frame->width
            || last_h != frame->height
            || last_format != frame->format
            || last_serial != is->viddec.pkt_serial
            || last_vfilter_idx != is->vfilter_idx) {
            av_log(NULL, AV_LOG_DEBUG,
                   "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
                   last_w, last_h,
                   (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
                   frame->width, frame->height,
                   (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
            avfilter_graph_free(&graph);
            // 创建滤镜图容器
            graph = avfilter_graph_alloc();
            if (!graph) {
                ret = AVERROR(ENOMEM);
                goto the_end;
            }
            // 滤镜容器线程
            graph->nb_threads = filter_nbthreads;
            // 配置视频滤镜
            if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
                SDL_Event event;
                event.type = FF_QUIT_EVENT;
                event.user.data1 = is;
                SDL_PushEvent(&event);
                goto the_end;
            }

            // 输入滤镜
            filt_in  = is->in_video_filter;
            // 输出滤镜
            filt_out = is->out_video_filter;
            // 保存当前的帧宽
            last_w = frame->width;
            // 保存当前的帧高
            last_h = frame->height;
            // 保存当前帧的像素格式
            last_format = frame->format;
            // 保存当前帧所在序列
            last_serial = is->viddec.pkt_serial;
            // 保存滤镜索引
            last_vfilter_idx = is->vfilter_idx;
            // 获取帧率
            frame_rate = av_buffersink_get_frame_rate(filt_out);
        }

        // 在这里获取滤镜输出后的像素格式
//        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
//        printf("%s(%d) frame frame fmt is %d,%s \n", __FUNCTION__, __LINE__, frame->format, desc->name);

        // 向 buffer 滤镜输入 frame
        ret = av_buffersrc_add_frame(filt_in, frame);
        if (ret < 0)
            goto the_end;

        while (ret >= 0) {
            // 统计当前帧播放时已经过了多少微秒
            is->frame_last_returned_time = av_gettime_relative() / 1000000.0;

            // 从 buffersink 滤镜读取输出 frame
            ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
            if (ret < 0) {
                if (ret == AVERROR_EOF)
                    is->viddec.finished = is->viddec.pkt_serial;
                ret = 0;
                break;
            }

            // 记录读取从滤镜输出消耗多少时间
            is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
            if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0) {
                // 是否超过同步阈值
               is->frame_last_filter_delay = 0;
            }

            // 获取滤镜视频的时间基
            tb = av_buffersink_get_time_base(filt_out);
#endif
            // 一帧视频的时间，这里为帧率的倒数, 一般是 1/25 = 0.04s
            duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
            // 显示时间，换算公式:  frame->pts * is->video_st->time_base (流的时间基)
            pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
            // 2. 压入视频帧数据到 is->pictq
            ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
            // 释放帧数据 frame, 传入的 frame 已经通过 av_frame_move_ref 拷贝到 FrameQueue 队列里。这里可以释放掉
            av_frame_unref(frame);
#if CONFIG_AVFILTER
            if (is->videoq.serial != is->viddec.pkt_serial)
                break;
        }
#endif

        if (ret < 0)
            goto the_end;
    }
 the_end:  // 释放滤镜、释放 AVFrame 内存
#if CONFIG_AVFILTER
    avfilter_graph_free(&graph);
#endif
    av_frame_free(&frame);
    return 0;
}

static int subtitle_thread(void *arg)
{
    VideoState *is = arg;
    Frame *sp;
    int got_subtitle;
    double pts;

    for (;;) {
        if (!(sp = frame_queue_peek_writable(&is->subpq)))
            return 0;

        if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
            break;

        pts = 0;

        if (got_subtitle && sp->sub.format == 0) {
            if (sp->sub.pts != AV_NOPTS_VALUE)
                pts = sp->sub.pts / (double)AV_TIME_BASE;
            sp->pts = pts;
            sp->serial = is->subdec.pkt_serial;
            sp->width = is->subdec.avctx->width;
            sp->height = is->subdec.avctx->height;
            sp->uploaded = 0;

            /* now we can update the picture count */
            frame_queue_push(&is->subpq);
        } else if (got_subtitle) {
            avsubtitle_free(&sp->sub);
        }
    }
    return 0;
}

/* copy samples for viewing in editor window */
static void update_sample_display(VideoState *is, short *samples, int samples_size)
{
    int size, len;

    size = samples_size / sizeof(short);
    while (size > 0) {
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
        if (len > size)
            len = size;
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
        samples += len;
        is->sample_array_index += len;
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
            is->sample_array_index = 0;
        size -= len;
    }
}

/* return the wanted number of samples to get better sync if sync_type is video
 * or external master clock */
static int synchronize_audio(VideoState *is, int nb_samples)
{
    int wanted_nb_samples = nb_samples;

    /* if not master, then we try to remove or add samples to correct the clock */
    // 这里是以视频为主时钟，音频向视频同步才会调用
    if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
        double diff, avg_diff;
        int min_nb_samples, max_nb_samples;

        // 计算音频时间差
        diff = get_clock(&is->audclk) - get_master_clock(is);

        // 小于10s
        if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
            //
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;

            // 目的是取20次音频帧
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
                /* not enough measures to have a correct estimate */
                is->audio_diff_avg_count++;
            } else {
                /* estimate the A-V difference */
                // 累计20次音频帧
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
//                printf("累计20次加权平均数: avg_diff: %lf\n", avg_diff);

                // 当加权平均数大于音视频同步阈值，进行样本数调整
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
                    wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
                    // 这里的算法限制调整幅度，最小不能减少10%, 增加不能大于10%。这是因为音频连续性很强，调整过大采样数会被人耳朵感受到
                    min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
                    max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
                    wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
                }
                av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
                        diff, avg_diff, wanted_nb_samples - nb_samples,
                        is->audio_clock, is->audio_diff_threshold);
            }
        } else {
            /* too big difference : may be initial PTS errors, so
               reset A-V filter */
            is->audio_diff_avg_count = 0;
            is->audio_diff_cum       = 0;
        }
    }

    return wanted_nb_samples;
}


/**
 * Decode one audio frame and return its uncompressed size.
 *
 * The processed audio frame is decoded, converted if required, and
 * stored in is->audio_buf, with size in bytes given by the return
 * value.
 */
static int audio_decode_frame(VideoState *is)
{
    int data_size, resampled_data_size;
    int64_t dec_channel_layout;
    av_unused double audio_clock0;
    int wanted_nb_samples;
    Frame *af;

    if (is->paused)
        return -1;

    // 处理丢弃音频帧的逻辑
    do {
#if defined(_WIN32)
        while (frame_queue_nb_remaining(&is->sampq) == 0) {
            if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
                return -1;
            av_usleep (1000);
        }
#endif
        // 当播放完毕后，尝试读取 frame 就会阻塞
        if (!(af = frame_queue_peek_readable(&is->sampq)))
            return -1;
        frame_queue_next(&is->sampq);
    } while (af->serial != is->audioq.serial); // 重点：之前有快进就会导致这里的序列号判断不一致，不是的话就立即丢弃

    data_size = av_samples_get_buffer_size(NULL, af->frame->channels,
                                           af->frame->nb_samples,
                                           af->frame->format, 1);

    dec_channel_layout =
        (af->frame->channel_layout && af->frame->channels == av_get_channel_layout_nb_channels(af->frame->channel_layout)) ?
            af->frame->channel_layout : av_get_default_channel_layout(af->frame->channels);

    // 重点：音频同步逻辑, wanted_nb_samples 根据同步情况调整采样样本数
    wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);

    // is->audio_src 保存的是音频输出设备支持的参数，如果和解码出来的 AVFrame 不同，那么就需要进行重采样
    if (af->frame->format        != is->audio_src.fmt            ||
        dec_channel_layout       != is->audio_src.channel_layout ||
        af->frame->sample_rate   != is->audio_src.freq           ||
        (wanted_nb_samples       != af->frame->nb_samples && !is->swr_ctx)) {
        swr_free(&is->swr_ctx);
        is->swr_ctx = swr_alloc_set_opts(NULL,
                                         is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
                                         dec_channel_layout,           af->frame->format, af->frame->sample_rate,
                                         0, NULL);
        if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
            av_log(NULL, AV_LOG_ERROR,
                   "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
                   af->frame->sample_rate, av_get_sample_fmt_name(af->frame->format), af->frame->channels,
                   is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
            swr_free(&is->swr_ctx);
            return -1;
        }
        is->audio_src.channel_layout = dec_channel_layout;
        is->audio_src.channels       = af->frame->channels;
        is->audio_src.freq = af->frame->sample_rate;
        is->audio_src.fmt = af->frame->format;
    }

    // 重采样 SwrContext
    if (is->swr_ctx) {
        // 音频数据
        const uint8_t **in = (const uint8_t **)af->frame->extended_data;
        // 输出缓冲区
        uint8_t **out = &is->audio_buf1;
        // 重采样后的采样个数samples
        int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
        // 计算一帧数据大小
        int out_size  = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
        int len2;
        if (out_size < 0) {
            av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
            return -1;
        }

        // 期待的样本数不同，进行重采样
        if (wanted_nb_samples != af->frame->nb_samples) {
            if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
                                     wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
                av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
                return -1;
            }
        }
        av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
        if (!is->audio_buf1)
            return AVERROR(ENOMEM);
        // 重采样
        len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
        if (len2 < 0) {
            av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
            return -1;
        }
        if (len2 == out_count) {
            av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
            if (swr_init(is->swr_ctx) < 0)
                swr_free(&is->swr_ctx);
        }
        is->audio_buf = is->audio_buf1;
        resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
    } else {
        is->audio_buf = af->frame->data[0];
        resampled_data_size = data_size;
    }

    audio_clock0 = is->audio_clock;
    /* update the audio clock with the pts */
    if (!isnan(af->pts))
        is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
    else
        is->audio_clock = NAN;
    is->audio_clock_serial = af->serial;
#ifdef DEBUG
    {
        static double last_clock;
        printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
               is->audio_clock - last_clock,
               is->audio_clock, audio_clock0);
        last_clock = is->audio_clock;
    }
#endif
    return resampled_data_size;
}

/* prepare a new audio buffer */
/**
 * sdl 回调
 * @brief sdl_audio_callback
 * @param opaque   这里是传递的参数, VideoState ffplay 全局管理器
 * @param stream   这里是 sdl 用于播放的内存空间，播放时将数据拷贝到 stream
 * @param len      每次回调取的数据长度
 */
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
{
    // 这里是将 userdata，实际就是全局参数 VideoState
    VideoState *is = opaque;

    int audio_size, len1;

    // sdl 音频回调函数时间
    audio_callback_time = av_gettime_relative();

    while (len > 0) {
        if (is->audio_buf_index >= is->audio_buf_size) {
            // 这个函数很有迷惑性，这里其实是读取一帧音频原始数据，并非是理解上的解码帧
           audio_size = audio_decode_frame(is);
//            printf("%s(%d) audio_size: %d\n", __FUNCTION__, __LINE__, audio_size);
           if (audio_size < 0) {
                /* if error, just output silence */
               is->audio_buf = NULL;
               is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
           } else {
               if (is->show_mode != SHOW_MODE_VIDEO)
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
               // 记录当前读取的 audio_buf_size
               is->audio_buf_size = audio_size;
           }
           // 重置读取索引
           is->audio_buf_index = 0;
        }
        // 读取到的 audio_buf_size 小于 sdl 指定的 len，就会循环读取
        len1 = is->audio_buf_size - is->audio_buf_index;
        if (len1 > len)
            len1 = len;

        // 以最大音量拷贝，拷贝的数据长度是 len1
        if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
            memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
        else {
             // 调整音量后播放
            memset(stream, 0, len1);
            if (!is->muted && is->audio_buf)
                SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
        }

        // sdl 要求 len, 结果只读取到 len1, 还剩下多少未读
        len -= len1;
        // stream 索引位置 + 读取到的 len1 位置
        stream += len1;
        // 记录已经读取 audio_buf_index 位置，方便下一次读取提取多少数据供 stream 使用
        is->audio_buf_index += len1;
    }
    is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
//    printf("%s(%d) is->audio_write_buf_size: %d\n", __FUNCTION__, __LINE__, is->audio_write_buf_size);
    /* Let's assume the audio driver that is used by SDL has two periods. */
//    printf("%s(%d) is->audio_clock: %lf\n", __FUNCTION__, __LINE__, is->audio_clock);
    if (!isnan(is->audio_clock)) {
        // Let's assume the audio driver that is used by SDL has two periods.
        // 这里其实分为3段内存空间，第一段是 SDL 内部缓存的 audio_hw_buf_size, 第二段是读取的 len, 第三段是缓存里还有多少数据没拷贝给 SDL 的
        // 第二段的 len 其实就是 audio_hw_buf_size, 因此计算当前音频播放时间的 pts 公式如下
        double pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size)
                                           / is->audio_tgt.bytes_per_sec;
//         printf("%s(%d) is->audio_clock: %lf, pts: %lf\n", __FUNCTION__, __LINE__, is->audio_clock, pts);
        // 设置音频时钟
        set_clock_at(&is->audclk, pts, is->audio_clock_serial, audio_callback_time / 1000000.0);
        sync_clock_to_slave(&is->extclk, &is->audclk);
    }
}

/**
 * 打开 sdl，通过音频参数去查找
 * @brief audio_open
 * @param opaque   VideoState 对象
 * @param wanted_channel_layout 期望的声道布局
 * @param wanted_nb_channels  期望的通道数
 * @param wanted_sample_rate 期望的采样率
 * @param audio_hw_params 这里 audio_hw_params 其实就是 is->audio_tgt
 * @return
 */
static int audio_open(void *opaque,
                      int64_t wanted_channel_layout,
                      int wanted_nb_channels,
                      int wanted_sample_rate,
                      struct AudioParams *audio_hw_params)
{
    // SDL Audio 期望的参数，和实际参数
    SDL_AudioSpec wanted_spec, spec;
    // SDL Audio 环境
    const char *env;
    // 声道数列表，用来找出 SDL 可用的
    static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
    // 采样率列表，用来找出 SDL 可用的
    static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
    // 从 next_sample_rates 列表从最后往前找，从大到小找采样率
    int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;

    // 是否配置 audio channels
    env = SDL_getenv("SDL_AUDIO_CHANNELS");
    if (env) {
        //SDL 默认的声道数
        wanted_nb_channels = atoi(env);
        // 获取默认声道数的声道布局
        wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
    }

    // 如果传入的声道布局 wanted_channel_layout 和 wanted_nb_channels 不匹配，进行修正
    if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
        wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
        wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
    }

    // 根据声道布局 wanted_channel_layout 得到声道数 wanted_nb_channels
    wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
    // 设置SDL 期望参数的声道数 channels
    wanted_spec.channels = wanted_nb_channels;
    // 设置SDL 期望的采样率 freq
    wanted_spec.freq = wanted_sample_rate;
    // 如果期望的采样率和通道数有一个小于0，则返回-1 音频播放失败
    if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
        av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
        return -1;
    }

    // 从 SDL 支持的采样率中，从高采样率到低采样率中查找匹配到的采样率，并记录其索引位置
    while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
        next_sample_rate_idx--; // 首先从最大的采样率开始查找

    printf("%s(%d) next_sample_rate_idx: %d\n",__FUNCTION__, __LINE__, next_sample_rate_idx);

    // 采样精度 16bit
    wanted_spec.format = AUDIO_S16SYS;
    wanted_spec.silence = 0;    //

    // 通过 SDL_AUDIO_MAX_CALLBACKS_PER_SEC 得到一帧音频样本个数
    // wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC 每次回调采样个数，av_log2 的意思是指，要得到前面的结果，要以 2 的几次幂得到
    // 2 << 这里的计算参考 https://wiki.libsdl.org/SDL2/SDL_OpenAudioDevice
    printf("2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC): %d\n", (2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC)));
    wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));

    // 设置 SDL Audio 回调函数, 在播放时，SDL 会通过回调读取数据
    wanted_spec.callback = sdl_audio_callback;

    // 保存的参数，这里保存的是 VideoState
    wanted_spec.userdata = opaque;

    // 查找合适的音频播放参数进行播放，因为音频设备各式各样，需要找到最合适的播放参数，因此可能就需要重采样
    while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
        av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
               wanted_spec.channels, wanted_spec.freq, SDL_GetError());

        // 这里进行声道切换，举个例子，
        // 如果 wanted_spec.channels=7,那么 [7] = 6, 即降到6声道试试
        wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];

        // 如果声道为 0了还是没找到，那说明采样率可能不对，就降低一下采样
        if (!wanted_spec.channels) {
            // 降低一下采样率
            wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
            // 声道数还是尝试用原先的
            wanted_spec.channels = wanted_nb_channels;
            if (!wanted_spec.freq) {
                av_log(NULL, AV_LOG_ERROR,
                       "No more combinations to try, audio open failed\n");
                return -1;
            }
        }
        // 更新一下声道布局
        wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
    }

    // 打开声音设备后，如果采样率不为 AUDIO_S16SYS，说明采样率不支持，音频播放失败
    if (spec.format != AUDIO_S16SYS) {
        av_log(NULL, AV_LOG_ERROR,
               "SDL advised audio format %d is not supported!\n", spec.format);
        return -1;
    }

    // 期望的声道数据和实际打开的声道数不匹配时，需要更正一下期望的声道布局 wanted_channel_layout
    if (spec.channels != wanted_spec.channels) {
        wanted_channel_layout = av_get_default_channel_layout(spec.channels);
        if (!wanted_channel_layout) {
            av_log(NULL, AV_LOG_ERROR,
                   "SDL advised channel count %d is not supported!\n", spec.channels);
            return -1;
        }
    }

    // 将 SDL 音频设备打开后得到的参数
    // 比如采样率、采样格式、采样精度、声道布局、声道数等参数赋值给 is->audio_tgt
    audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
    // 采样率
    audio_hw_params->freq = spec.freq;
    // 声道布局
    audio_hw_params->channel_layout = wanted_channel_layout;
    // 声道数
    audio_hw_params->channels =  spec.channels;
    // 每一个采样点占用多少内存字节
    audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
    // 每秒多少字节
    audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
    if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
        av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
        return -1;
    }

    printf("%s(%d) samples: %d\n", __FUNCTION__, __LINE__, spec.samples);
    //  SDL 内部音频数据缓存的大小, 当 SDL 内部只有 spec.size 数据时，就会调用 SDL 回调来拿数据
    // 计算公式: 一帧采样样本数 * 声道数 * 采样精度(按字节) = 2048 * 2 * 2 = 8192
    return spec.size;
}

/* open a given stream. Return 0 if OK */
static int stream_component_open(VideoState *is, int stream_index)
{
    AVFormatContext *ic = is->ic;
    AVCodecContext *avctx;
    const AVCodec *codec;
    const char *forced_codec_name = NULL;
    AVDictionary *opts = NULL;
    AVDictionaryEntry *t = NULL;
    int sample_rate, nb_channels;
    int64_t channel_layout;
    int ret = 0;
    int stream_lowres = lowres;

    // stream_index 合法判断
    if (stream_index < 0 || stream_index >= ic->nb_streams)
        return -1;

    // 1. 创建解码器上下文
    avctx = avcodec_alloc_context3(NULL);
    if (!avctx)
        return AVERROR(ENOMEM);

    // 2. 拷贝流信息到解码器上下文
    ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
    if (ret < 0)
        goto fail;

    // 解码器包时间基
    // 对于音频是采样率倒数，例如 1/44100
    // 对于视频来说一般采样率一般是 90000, 例如 1/90000
    avctx->pkt_timebase = ic->streams[stream_index]->time_base;

    // 3. 查找解码器
    codec = avcodec_find_decoder(avctx->codec_id);

    // 是否强制指定解码器名称， 可以通过 -acodec aac 指定音频解码器，-vcodec h264 指定视频解码器
    switch(avctx->codec_type){
        case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; forced_codec_name =    audio_codec_name; break;
        case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
        case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; forced_codec_name =    video_codec_name; break;
    }

    // 有强制指定解码器名称，进行查找
    if (forced_codec_name)
        codec = avcodec_find_decoder_by_name(forced_codec_name);

    // 找不到直接报错
    if (!codec) {
        if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
                                      "No codec could be found with name '%s'\n", forced_codec_name);
        else                   av_log(NULL, AV_LOG_WARNING,
                                      "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
        ret = AVERROR(EINVAL);
        goto fail;
    }

    avctx->codec_id = codec->id;
    if (stream_lowres > codec->max_lowres) {
        av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
                codec->max_lowres);
        stream_lowres = codec->max_lowres;
    }
    avctx->lowres = stream_lowres;

    if (fast)
        avctx->flags2 |= AV_CODEC_FLAG2_FAST;

    // 提取命令行的相关参数，比如视频码率 -b:v 200k
    opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
    if (!av_dict_get(opts, "threads", NULL, 0))
        av_dict_set(&opts, "threads", "auto", 0);
    if (stream_lowres)
        av_dict_set_int(&opts, "lowres", stream_lowres, 0);

    // 4. 打开解码器
    if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
        goto fail;
    }
    if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
        av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
        ret =  AVERROR_OPTION_NOT_FOUND;
        goto fail;
    }

    // 标记完成表示为0，表示还为结束
    is->eof = 0;
    // 设置数据流不丢弃，av_read_frame 就会读到 AVPacket
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;

    // 根据解码器类型进行选型
    switch (avctx->codec_type) {
    case AVMEDIA_TYPE_AUDIO:
// 关于滤镜的，一般都会编译滤镜库的，所以这里一定会进
#if CONFIG_AVFILTER
        {
            AVFilterContext *sink;

            // 首先先记录解码器解码出来的原始音频参数，例如: 采样率、采样格式、声道数、声道布局
            is->audio_filter_src.freq           = avctx->sample_rate;  // 音频采样率
            is->audio_filter_src.channels       = avctx->channels;     // 通道数
            is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels); // 声道布局
            is->audio_filter_src.fmt            = avctx->sample_fmt; // 采样精度

            // 配置 afilters 音频滤镜上下文, 可通过 -af 指定过滤器。改变原始的音频参数
            // 修改采样率、采样精度、通道数
            if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
                goto fail;

            // 获取输出滤镜filt_asink
            sink = is->out_audio_filter;

            // 输出滤镜的采样率
            sample_rate    = av_buffersink_get_sample_rate(sink);
            // 输出滤镜声道数
            nb_channels    = av_buffersink_get_channels(sink);
            // 输出滤镜声道布局
            channel_layout = av_buffersink_get_channel_layout(sink);
        }
#else
        sample_rate    = avctx->sample_rate;
        nb_channels    = avctx->channels;
        channel_layout = avctx->channel_layout;
#endif

        // prepare audio output 音频播放前准备工作
        // is->audio_tgt 经过 audio_open 后，设置了最新的音频播放参数
        // 返回值: -1 SDL 没有找到合适的声音播放参数  否则返回 SDL 内部音频缓存数据大小
        if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
            goto fail;

        // SDL 内部音频缓存数据大小
        is->audio_hw_buf_size = ret;  // 8192
        // is->audio_tgt: 保存SDL筛选出来何时的参数。
        is->audio_src = is->audio_tgt;
        // ffplay 内部维护的一个 audio buf, 重音频重采样后的 PCM 数据会保存到 audio buf 里。
        // SDL 回调时从 audio buf 里拿数据
        is->audio_buf_size  = 0;
        // sdl 已从 audio buf 里拿了多少数据的索引位置
        is->audio_buf_index = 0;

        /*这部分代码是音频向视频进行同步，需指定 sync video*/
        /* init averaging filter */
        // 当音频向视频同步时用，求出一个数连续乘以自身20次后等于0.1，这里大约为: 0.79432
        is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
        // 统计音频帧计数
        is->audio_diff_avg_count = 0;
        /* since we do not have a precise anough audio FIFO fullness,
           we correct audio sync only if larger than this threshold */
        // 一次音频回调的间隔时间
        is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;

        // 记录音频流索引
        is->audio_stream = stream_index;
        // 记录音频流
        is->audio_st = ic->streams[stream_index];

        // 音频解码器初始化
        if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
            goto fail;
        if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
            is->auddec.start_pts = is->audio_st->start_time;
            is->auddec.start_pts_tb = is->audio_st->time_base;
        }
        // 启动音频解码线程，此时 PacketQueue 的 serial 为 1
        if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
            goto out;

        // 恢复音频状态，准备读取音频数据，会不断调用 sdl callback
        SDL_PauseAudioDevice(audio_dev, 0);
        break;
    case AVMEDIA_TYPE_VIDEO:
        // 指定视频流索引
        is->video_stream = stream_index;
        // 保存视频流
        is->video_st = ic->streams[stream_index];

        // 初始化视频解码器
        if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0)
            goto fail;

        // 启动视频解码线程
        if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
            goto out;

        // 视频封面相关
        is->queue_attachments_req = 1;
        break;
    case AVMEDIA_TYPE_SUBTITLE:
        is->subtitle_stream = stream_index;
        is->subtitle_st = ic->streams[stream_index];

        // 初始化字幕解码器
        if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0)
            goto fail;

        // 启动字幕解码线程
        if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
            goto out;
        break;
    default:
        break;
    }
    goto out;

fail:
    avcodec_free_context(&avctx);
out:
    av_dict_free(&opts);

    return ret;
}

/**
 * 解码中断回调
 * 1. avformat_open_input 时触发
 * 2. avformat_find_stream_info
 * 3. av_read_frame
 *
 * @brief decode_interrupt_cb
 * @param ctx
 * @return
 */
static int decode_interrupt_cb(void *ctx)
{
    VideoState *is = ctx;
    return is->abort_request;
}

/**
 * 1.  stream_id 流ID不合法
 * 2. queue->abort_request 用户提前终止
 * 3. (st->disposition & AV_DISPOSITION_ATTACHED_PIC) 如果当前流是封面图片
 * 4. queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0)
 *    nb_packets 超过25帧，并且当前待解码的帧总时间已经超过1s
 *
 * @return 返回1表示要等待10ms, 返回0还可以继续解码
 */
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
    int ret = stream_id < 0 ||
           queue->abort_request ||
           (st->disposition & AV_DISPOSITION_ATTACHED_PIC) ||
           queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);

   return ret;
}

static int is_realtime(AVFormatContext *s)
{
    if(   !strcmp(s->iformat->name, "rtp")
       || !strcmp(s->iformat->name, "rtsp")
       || !strcmp(s->iformat->name, "sdp")
    )
        return 1;

    if(s->pb && (   !strncmp(s->url, "rtp:", 4)
                 || !strncmp(s->url, "udp:", 4)
                )
    )
        return 1;
    return 0;
}

/* this thread gets the stream from the disk or the network */
static int read_thread(void *arg)
{
    // 播放器管家
    VideoState *is = arg;
    //多媒体上下文
    AVFormatContext *ic = NULL;
    int err, i, ret;

    // 用来记录符合播放的音视频流索引
    int st_index[AVMEDIA_TYPE_NB];

    AVPacket *pkt = NULL;
    // 音视频流的开始时间
    int64_t stream_start_time;

    // 可播放的范围，主要是时间
    int pkt_in_play_range = 0;

    AVDictionaryEntry *t;

    // 创建条件变量
    SDL_mutex *wait_mutex = SDL_CreateMutex();

    // 对应 MPEG TS 封装容器，可以支持多频道切换
    int scan_all_pmts_set = 0;

    int64_t pkt_ts;

    if (!wait_mutex) {
        av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
        ret = AVERROR(ENOMEM);
        goto fail;
    }

    // st_index数组初始化，该数组用来保存音视频流播放的索引。将其值复制为 -1
    memset(st_index, -1, sizeof(st_index));
    // 标记未结束，因为现在刚开始
    is->eof = 0;

    // 分配存放解压前的数据包
    pkt = av_packet_alloc();
    if (!pkt) {
        av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n");
        ret = AVERROR(ENOMEM);
        goto fail;
    }
    // 1. 分配解码上下文
    ic = avformat_alloc_context();
    if (!ic) {
        av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
        ret = AVERROR(ENOMEM);
        goto fail;
    }
    // 2. 设置中断回调，解码过程中会不断回调
    // 尤其在网络流中，如果网络延迟可能会导致在 avformat_open_input 中卡主，因此必须保证用户能在此过程中暂停或终止
    // 调用栈,有三个地方会调用
    // 1. avformat_open_input
    // 2. avformat_find_stream_info
    // 3. av_read_frame
    ic->interrupt_callback.callback = decode_interrupt_cb;
    // 传递给回调的参数，这里是 VideoState
    ic->interrupt_callback.opaque = is;

    // 应用于 MEPG TS 封装格式
    // 可通过 ffmpeg -h demuxer=mpegts 查看 mpegts 支持的 option
    // ffmepg -h demuxer=mp4 ....
    if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
        av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
        scan_all_pmts_set = 1;
    }
    // 3. 打开媒体输入
    err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
    if (err < 0) {
        print_error(is->filename, err);
        ret = -1;
        goto fail;
    }
    // 扫描 MPEG TS 封装格式
    if (scan_all_pmts_set)
        av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);

    // 检查设置的 format_opts 有哪些参数是有问题的
    if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
        av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
        ret = AVERROR_OPTION_NOT_FOUND;
        goto fail;
    }
    // AVFormatContext 管家
    is->ic = ic;

    if (genpts)
        ic->flags |= AVFMT_FLAG_GENPTS;

    av_format_inject_global_side_data(ic);

    // 4. 探测媒体流信息
    if (find_stream_info) {
        AVDictionary **opts = setup_find_stream_info_opts(ic, codec_opts);
        int orig_nb_streams = ic->nb_streams;

        // 对于一些头部没有封装格式信息的，比如 FLV 或 ts，就需要提前先去分析流信息，然后把这些读取的 AVPacket 缓存
        // av_read_frame 调用时使用
        err = avformat_find_stream_info(ic, opts);

        for (i = 0; i < orig_nb_streams; i++)
            av_dict_free(&opts[i]);
        av_freep(&opts);

        if (err < 0) {
            av_log(NULL, AV_LOG_WARNING,
                   "%s: could not find codec parameters\n", is->filename);
            ret = -1;
            goto fail;
        }
    }

    if (ic->pb)
        ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end

    // 是否传递了要跳过的字节
    if (seek_by_bytes < 0)
        seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);

    // 重要: 在音视频同步时用来判断是否摆烂的标志，如果视频比音频慢了10秒以上，那就直接摆烂了。这应该是个经验值!!!
    is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;

    // 显示标题
    if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
        window_title = av_asprintf("%s - %s", t->value, input_filename);

    /* if seeking requested, we execute it */
    // 设置起始播放时间，比如使用 -ss 00:10，表示从10秒开始播放,将其会转为微妙
    if (start_time != AV_NOPTS_VALUE) {
        int64_t timestamp;

        // 转为微妙
        timestamp = start_time;
        /* add the stream start time */
        // 找到第一帧起始时间，然后加上设置的 start_time 时间
        if (ic->start_time != AV_NOPTS_VALUE)
            timestamp += ic->start_time;
        // seek的指定的位置开始播放
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
        if (ret < 0) {
            av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
                    is->filename, (double)timestamp / AV_TIME_BASE);
        }
    }

    // 是否是实时流，比如 RTSP、RTP、sdp等
    is->realtime = is_realtime(ic);

    // 打印媒体信息
    if (show_status)
        av_dump_format(ic, 0, is->filename, 0);

//    for (int i = 0; i < )

    // 5. 遍历媒体流，查找解码器
    for (i = 0; i < ic->nb_streams; i++) {
        AVStream *st = ic->streams[i];
        enum AVMediaType type = st->codecpar->codec_type;
        st->discard = AVDISCARD_ALL;  // 丢弃所有包
        // wanted_stream_spec[type] 手动指定播放流的索引，比如 -ast 0 表示播放第一个音频， -ast 1 播放第二个音频
        if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
            if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
                st_index[type] = i;
    }
    // wanted_stream_spec 指定的参数有误，打印出错误日志
    for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
        if (wanted_stream_spec[i] && st_index[i] == -1) {
            av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
            st_index[i] = INT_MAX; // 这是原生的写法，如果发生指定错误，那么就不在播放
            //st_index[i] = -1;   // fixed me,这里修复指定错误后也可以播放
        }
    }

    // 调试: huangxiaojing 分析 st_index[] 保存的哪些音视频流索引
    for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
        printf("st_index[%d]: %d\nwanted_stream_spec[%d]: %s\n",
               i, st_index[i], i, wanted_stream_spec[i]);
    }

    // video_disable 通过外部传参指定，通过 av_find_best_stream 查找，无论有没有指定都会查找一次
    if (!video_disable)
        st_index[AVMEDIA_TYPE_VIDEO] =
            av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
                                st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0); // 查找符合的視頻

    // audio_disable 通过外部传参指定
    if (!audio_disable)
        st_index[AVMEDIA_TYPE_AUDIO] =
            av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
                                st_index[AVMEDIA_TYPE_AUDIO],
                                st_index[AVMEDIA_TYPE_VIDEO],
                                NULL, 0);  // 查找符合的音頻

    // subtitle_disable 通过外部传参指定
    if (!video_disable && !subtitle_disable)
        st_index[AVMEDIA_TYPE_SUBTITLE] =
            av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
                                st_index[AVMEDIA_TYPE_SUBTITLE],
                                (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
                                 st_index[AVMEDIA_TYPE_AUDIO] :
                                 st_index[AVMEDIA_TYPE_VIDEO]),
                                NULL, 0);  // 查找符合的字幕

    // 显示模式，通过外部传参指定, 0 = video (视频，默认), 1 = waves (音频波形图), 2 = RDFT (离散傅里叶变换)
    is->show_mode = show_mode;

    // 如果有视频流，根据编码器选择合适的视频显示宽高比
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
        AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
        AVCodecParameters *codecpar = st->codecpar;
        AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL);
        if (codecpar->width)
            set_default_window_size(codecpar->width, codecpar->height, sar);  // 根據 sar 寬高比拉伸
    }

    // 6. 具体打开音视频解码器以及一些前置配置信息
    /* open the streams */
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
        // 重點: 打开音频解码线程
        stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
    }

    ret = -1;
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
         // 重點: 打开视频解码线程
        ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
    }

    // 显示模式，如果有视频就是显示视频，没有视频就显示音频声谱图
    if (is->show_mode == SHOW_MODE_NONE) {
         // 显示视频或音频离散型号
          is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
        // 显示音频波形
//        is->show_mode = SHOW_MODE_WAVES;

        // 显示音频离散信号
//        is->show_mode = SHOW_MODE_RDFT;
    }

    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
        // 重點: 打开字幕解码线程
        stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
    }

    // 音视频都没有，跳转到 fail 执行
    if (is->video_stream < 0 && is->audio_stream < 0) {
        av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
               is->filename);
        ret = -1;
        goto fail;
    }

    // infinite_buffer 用于实时流播放，这里是 -1,强制指定实时流为 1
    if (infinite_buffer < 0 && is->realtime)
        infinite_buffer = 1;

    // 重点：从媒体文件中读取音视频 AVPacket
    for (;;) {
        // 用户退出时，退出解码
        // 调用： event_loop 事件循环中，用户按下 q 退出，调用 do_exit() -> stream_close()
        if (is->abort_request)
            break;

        // 重点1: 检测流暂停，网络流可以用到，比如 rtsp、rtmp等等
        // 恢复时直接从网络流获取最新的进行播放
        if (is->paused != is->last_paused) {
            is->last_paused = is->paused;
            if (is->paused)
                is->read_pause_return = av_read_pause(ic);
            else
                av_read_play(ic);
        }
#if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
        if (is->paused &&
                (!strcmp(ic->iformat->name, "rtsp") ||
                 (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
            /* wait 10 ms to avoid trying to get another packet */
            /* XXX: horrible */
            SDL_Delay(10);
            continue;
        }
#endif
        // seek 请求
        if (is->seek_req) {
            int64_t seek_target = is->seek_pos;
            int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
            int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
// FIXME the +-2 is due to rounding being not done in the correct direction in generation
//      of the seek_pos/seek_rel variables

            // 真正 seek 的地方
            // 参数详解:
            // 1. AVFormatContext *s, 音视频复用器上下文
            // 2. stream_index: 流索引，这里-1标识自动选择默认流
            // 3. min_ts: 允许的时间戳范围，用于放宽搜索条件，加速定位
            // 4. target_ts: 目标时间戳
            // 5. max_ts: 最大时间戳
            // 6. 标志位: 常见值 AVSEEK_FLAG_BACKWARD定位到目标时间戳的关键字
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR,
                       "%s: error while seeking\n", is->ic->url);
            } else {
                if (is->audio_stream >= 0)
                    packet_queue_flush(&is->audioq);  // 清空音频队列
                if (is->subtitle_stream >= 0)
                    packet_queue_flush(&is->subtitleq); // 清空字幕队列
                if (is->video_stream >= 0)
                    packet_queue_flush(&is->videoq);    // 清空视频队列
                if (is->seek_flags & AVSEEK_FLAG_BYTE) { // 同步外部时钟
                   set_clock(&is->extclk, NAN, 0);
                } else {
                   set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
                }
            }
            is->seek_req = 0;  // 重置 seek 请求
            is->queue_attachments_req = 1;
            is->eof = 0;
            if (is->paused)  // 如果本身是 pause 状态，显示一帧画面继续暂停
                step_to_next_frame(is);
        }
        // 对于有些 mp3 有专辑封面
        if (is->queue_attachments_req) {
            // 如果是视频流并且是专辑封面
            if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
                if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0)
                    goto fail;
                packet_queue_put(&is->videoq, pkt);
                packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
            }
            is->queue_attachments_req = 0;
        }

        /* if the queue are full, no need to read more */
        // 1. 判断所有的队里之和有没有超过 MAX_QUEUE_SIZE = 15Mb, 疑问: 这里的15Mb是否满足不同分辨率的视频？
        // 2. 或者所有音视频帧的时间加起来超过1秒
        if (infinite_buffer<1 &&
              (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
            || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
                stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
                stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
            /* wait 10 ms */
            SDL_LockMutex(wait_mutex);
            // 唤醒的地方:
            // 1. seek 操作
            // 2. 音视频 PacketQueue 队里为空时
            SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
            SDL_UnlockMutex(wait_mutex);
            continue;
        }
        // 退出机制（主要是音视频解码结束）
        // 1. !is->paused 非暂停状态
        // 2. 音频解码完成 (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0))
        // 3. 视频解码完成 (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0)))
        // 4. 播放循环次数完成 loop != 1 && (!loop || --loop)
        // 5. 是否自动退出 autoexit
        if (!is->paused &&
            (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
            (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
            // 如果 loop 是0，表示无限循环播放了
            if (loop != 1 && (!loop || --loop)) {
//                printf("%s(%d) 开始新的播放....\n", __FUNCTION__, __LINE__);
                // 设置的起始时间，单位是微妙，比如 00:10 从第10秒起开始播放
                stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
            } else if (autoexit) {
                ret = AVERROR_EOF;
                goto fail;
            } else {
                // 如果没有设置循环播放，或者自动退出。那么就会一直等待用户快退或重复播放操作
                printf("%s(%d) play finish....\n", __FUNCTION__, __LINE__);
            }
        }
        // 从媒体流中读压缩包
        ret = av_read_frame(ic, pkt);
        if (ret < 0) {
            // 读取压缩数据报完成，此时 is->eof=0，重刷解码器缓冲区，然后重置为1，就会一直等待10ms
            if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
                if (is->video_stream >= 0) {
                    // 发送空的 AVPacket 提示解码器需要将缓存区数据全部刷出来
                    packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
                }

                if (is->audio_stream >= 0) {
                    // 发送空的 AVPacket 提示解码器需要将缓存区数据全部刷出来
                    packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream);
                }

                if (is->subtitle_stream >= 0) {
                    // 发送空的 AVPacket 提示解码器需要将缓存区数据全部刷出来
                    packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream);
                }

                // 文件已经播放完毕
                is->eof = 1;
            }
            if (ic->pb && ic->pb->error) {
                if (autoexit)
                    goto fail;
                else
                    break;
            }
            // 等待 10毫秒
            SDL_LockMutex(wait_mutex);
            SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
            SDL_UnlockMutex(wait_mutex);
            continue;
        } else {
            is->eof = 0;
        }

        /* check if packet is in play range specified by user, then queue, otherwise discard */
        stream_start_time = ic->streams[pkt->stream_index]->start_time;
        pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
        // duration: 检查当前播放时间是否在设定的合法时间范围内，例如: ffplay -t 2 -i juren-5s.mp4 只播放2秒内的数据，超过的就丢弃。
        // pkt_in_play_range: 用于指定当前解码的帧是否还在可播放的时间窗口，比如指定视频在 00:00-02:00 两分钟内播放，如果此时读取到的压缩包大于 02:00，这个变量就为0
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
                (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
                av_q2d(ic->streams[pkt->stream_index]->time_base) -
                (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
                <= ((double)duration / 1000000);


        // 2024-03-29 huangxiaojing 添加
        if (!pkt_in_play_range) {
            // 不在合理的播放区间
            printf("pkt_in_play_range: %d\n", pkt_in_play_range);
        }

        // 将数据插入到 PacketQueue
        if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
            // 压入音频包
            packet_queue_put(&is->audioq, pkt);
        } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
                   && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
            // 压入视频包
            packet_queue_put(&is->videoq, pkt);
        } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
            // 压入字幕包
            packet_queue_put(&is->subtitleq, pkt);
        } else {
            av_packet_unref(pkt);
        }
    }

    ret = 0;
 fail:
    if (ic && !is->ic)
        avformat_close_input(&ic);

    av_packet_free(&pkt);
    if (ret != 0) {
        SDL_Event event;

        event.type = FF_QUIT_EVENT;
        event.user.data1 = is;
        SDL_PushEvent(&event);
    }
    SDL_DestroyMutex(wait_mutex);
    return 0;
}

static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
{
    // 播放器全局管理器，字段非常多，时钟、队列、解码器，各种状态都放在 VideoState 里面
    VideoState *is;

    // 分配内存，并将字段设置为0
    is = av_mallocz(sizeof(VideoState));
    if (!is)
        return NULL;
    is->last_video_stream = is->video_stream = -1;
    is->last_audio_stream = is->audio_stream = -1;
    is->last_subtitle_stream = is->subtitle_stream = -1;
    is->filename = av_strdup(filename);
    if (!is->filename)
        goto fail;
    is->iformat = iformat;
    is->ytop    = 0;
    is->xleft   = 0;

    /* start video display */
    // 主要是两个作用:
    // 1. 根据传入 max_size的提前分配 AVFrame
    // 2. FrameQueue 读写锁
    if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
        goto fail;
    if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
        goto fail;
    if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
        goto fail;

    // 主要有几个作用:
    // 1. 初始化 PacketQueue， 初始化 pkt_list
    // 2. 创建 PacketQueue 条件变量
    // 3. q->abort_request = 1
    if (packet_queue_init(&is->videoq) < 0 ||
        packet_queue_init(&is->audioq) < 0 ||
        packet_queue_init(&is->subtitleq) < 0)
        goto fail;

    // continue_read_thread 用于 audio_thread、video_thread、subtitle_thread 与 read_thread 线程间通讯
    if (!(is->continue_read_thread = SDL_CreateCond())) {
        av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
        goto fail;
    }

    // 初始化时钟， is->videoq.serial = 0
    init_clock(&is->vidclk, &is->videoq.serial);
    init_clock(&is->audclk, &is->audioq.serial);
    init_clock(&is->extclk, &is->extclk.serial);
    is->audio_clock_serial = -1;
    // 音量范围的修正
    if (startup_volume < 0)
        av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
    if (startup_volume > 100)
        av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
    startup_volume = av_clip(startup_volume, 0, 100);
    startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
    // 起始播放音量
    is->audio_volume = startup_volume;
    // 静音状态
    is->muted = 0;
    // 音视频同步方式，默认以音频同步方式
    is->av_sync_type = av_sync_type;

    // 重点: 创建 read_thread 线程，负责解码音视频包，加入到对应的 PacketQueue 队列
    is->read_tid     = SDL_CreateThread(read_thread, "read_thread", is);
    if (!is->read_tid) {
        av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
fail:
        stream_close(is);
        return NULL;
    }
    return is;
}

static void stream_cycle_channel(VideoState *is, int codec_type)
{
    AVFormatContext *ic = is->ic;
    int start_index, stream_index;
    int old_index;
    AVStream *st;
    AVProgram *p = NULL;
    int nb_streams = is->ic->nb_streams;

    if (codec_type == AVMEDIA_TYPE_VIDEO) {
        start_index = is->last_video_stream;
        old_index = is->video_stream;
    } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
        start_index = is->last_audio_stream;
        old_index = is->audio_stream;
    } else {
        start_index = is->last_subtitle_stream;
        old_index = is->subtitle_stream;
    }
    stream_index = start_index;

    if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
        p = av_find_program_from_stream(ic, NULL, is->video_stream);
        if (p) {
            nb_streams = p->nb_stream_indexes;
            for (start_index = 0; start_index < nb_streams; start_index++)
                if (p->stream_index[start_index] == stream_index)
                    break;
            if (start_index == nb_streams)
                start_index = -1;
            stream_index = start_index;
        }
    }

    for (;;) {
        if (++stream_index >= nb_streams)
        {
            if (codec_type == AVMEDIA_TYPE_SUBTITLE)
            {
                stream_index = -1;
                is->last_subtitle_stream = -1;
                goto the_end;
            }
            if (start_index == -1)
                return;
            stream_index = 0;
        }
        if (stream_index == start_index)
            return;
        st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
        if (st->codecpar->codec_type == codec_type) {
            /* check that parameters are OK */
            switch (codec_type) {
            case AVMEDIA_TYPE_AUDIO:
                if (st->codecpar->sample_rate != 0 &&
                    st->codecpar->channels != 0)
                    goto the_end;
                break;
            case AVMEDIA_TYPE_VIDEO:
            case AVMEDIA_TYPE_SUBTITLE:
                goto the_end;
            default:
                break;
            }
        }
    }
 the_end:
    if (p && stream_index != -1)
        stream_index = p->stream_index[stream_index];
    av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
           av_get_media_type_string(codec_type),
           old_index,
           stream_index);

    stream_component_close(is, old_index);
    stream_component_open(is, stream_index);
}


static void toggle_full_screen(VideoState *is)
{
    is_full_screen = !is_full_screen;
    SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
}

static void toggle_audio_display(VideoState *is)
{
    int next = is->show_mode;
    do {
        next = (next + 1) % SHOW_MODE_NB;
    } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
    if (is->show_mode != next) {
        is->force_refresh = 1;
        is->show_mode = next;
    }
}

static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
    double remaining_time = 0.0;
    SDL_PumpEvents();
    // 检查是否有键盘事件发生
    while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
        if (!cursor_hidden && av_gettime_relative() - cursor_last_shown > CURSOR_HIDE_DELAY) {
            SDL_ShowCursor(0);
            cursor_hidden = 1;
        }

        // 线程休眠值，这个值会在 video_refresh 中调整，目的是为了视频显示流畅也不至于让熏染执行非常多次
        if (remaining_time > 0.0)
            av_usleep((int64_t)(remaining_time * 1000000.0));

        // 这个值应当小于帧率
        remaining_time = REFRESH_RATE;

        // 显示模式： 视频或音频图谱
        if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
            video_refresh(is, &remaining_time);

        SDL_PumpEvents();
    }
}

static void seek_chapter(VideoState *is, int incr)
{
    int64_t pos = get_master_clock(is) * AV_TIME_BASE;
    int i;

    if (!is->ic->nb_chapters)
        return;

    /* find the current chapter */
    for (i = 0; i < is->ic->nb_chapters; i++) {
        AVChapter *ch = is->ic->chapters[i];
        if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
            i--;
            break;
        }
    }

    i += incr;
    i = FFMAX(i, 0);
    if (i >= is->ic->nb_chapters)
        return;

    av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
    stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
                                 AV_TIME_BASE_Q), 0, 0);
}

/* handle an event sent by the GUI */
static void event_loop(VideoState *cur_stream)
{
    SDL_Event event;
    double incr, pos, frac;

    for (;;) {
        double x;
        // 不断播放视频，知道键盘事件触发
        refresh_loop_wait_event(cur_stream, &event);
        switch (event.type) {
        case SDL_KEYDOWN:
            // 按下退出键时，退出程序
            if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
                do_exit(cur_stream);
                break;
            }
            // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
            // 窗口未准备好
            if (!cur_stream->width)
                continue;
            switch (event.key.keysym.sym) {
            case SDLK_f:  // 按下f键，全屏播放
                toggle_full_screen(cur_stream);
                cur_stream->force_refresh = 1;
                break;
            case SDLK_p:  // 按下p 或space键，暂停播放
            case SDLK_SPACE:
                toggle_pause(cur_stream);
                break;
            case SDLK_m:  // 按下mute，静音播放
                toggle_mute(cur_stream);
                break;
            case SDLK_KP_MULTIPLY:  // 按下加或0按键调低音量
            case SDLK_0:
                update_volume(cur_stream, 1, SDL_VOLUME_STEP);
                break;
            case SDLK_KP_DIVIDE: // 按下减或9按键调低音量
            case SDLK_9:
                update_volume(cur_stream, -1, SDL_VOLUME_STEP);
                break;
            case SDLK_s: // S: Step to next frame  // 逐帧开始播放
                step_to_next_frame(cur_stream);
                break;
            case SDLK_a:  // a键切换音频流
                stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
                break;
            case SDLK_v: // v键切换视频流
                stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
                break;
            case SDLK_c: //c键切换音视频流
                stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
                stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
                stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
                break;
            case SDLK_t: // t键切换字幕
                stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
                break;
            case SDLK_w: // w键切换滤镜效果
#if CONFIG_AVFILTER
                if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
                    if (++cur_stream->vfilter_idx >= nb_vfilters)
                        cur_stream->vfilter_idx = 0;
                } else {
                    cur_stream->vfilter_idx = 0;
                    toggle_audio_display(cur_stream);
                }
#else
                toggle_audio_display(cur_stream);
#endif
                break;
            case SDLK_PAGEUP: // 上键快进
                if (cur_stream->ic->nb_chapters <= 1) {
                    incr = 600.0;
                    goto do_seek;
                }
                seek_chapter(cur_stream, 1);
                break;
            case SDLK_PAGEDOWN: // 下键快退
                if (cur_stream->ic->nb_chapters <= 1) {
                    incr = -600.0;
                    goto do_seek;
                }
                seek_chapter(cur_stream, -1);
                break;
            case SDLK_LEFT: // 慢退10s
                incr = seek_interval ? -seek_interval : -10.0;
                goto do_seek;
            case SDLK_RIGHT: // 慢进10s
                incr = seek_interval ? seek_interval : 10.0;
                goto do_seek;
            case SDLK_UP:
                incr = 60.0;
                goto do_seek;
            case SDLK_DOWN:
                incr = -60.0;
            do_seek:
                    if (seek_by_bytes) {
                        pos = -1;
                        if (pos < 0 && cur_stream->video_stream >= 0)
                            pos = frame_queue_last_pos(&cur_stream->pictq);
                        if (pos < 0 && cur_stream->audio_stream >= 0)
                            pos = frame_queue_last_pos(&cur_stream->sampq);
                        if (pos < 0)
                            pos = avio_tell(cur_stream->ic->pb);
                        if (cur_stream->ic->bit_rate)
                            incr *= cur_stream->ic->bit_rate / 8.0;
                        else
                            incr *= 180000.0;
                        pos += incr;
                        stream_seek(cur_stream, pos, incr, 1);
                    } else {
                        pos = get_master_clock(cur_stream);
                        if (isnan(pos))
                            pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
                        pos += incr;
                        if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
                            pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
                    }
                break;
            default:
                break;
            }
            break;
        case SDL_MOUSEBUTTONDOWN:
            if (exit_on_mousedown) {
                do_exit(cur_stream);
                break;
            }
            if (event.button.button == SDL_BUTTON_LEFT) {
                static int64_t last_mouse_left_click = 0;
                if (av_gettime_relative() - last_mouse_left_click <= 500000) {
                    toggle_full_screen(cur_stream);
                    cur_stream->force_refresh = 1;
                    last_mouse_left_click = 0;
                } else {
                    last_mouse_left_click = av_gettime_relative();
                }
            }
        case SDL_MOUSEMOTION:
            if (cursor_hidden) {
                SDL_ShowCursor(1);
                cursor_hidden = 0;
            }
            cursor_last_shown = av_gettime_relative();
            if (event.type == SDL_MOUSEBUTTONDOWN) {
                if (event.button.button != SDL_BUTTON_RIGHT)
                    break;
                x = event.button.x;
            } else {
                if (!(event.motion.state & SDL_BUTTON_RMASK))
                    break;
                x = event.motion.x;
            }
                if (seek_by_bytes || cur_stream->ic->duration <= 0) {
                    uint64_t size =  avio_size(cur_stream->ic->pb);
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
                } else {
                    int64_t ts;
                    int ns, hh, mm, ss;
                    int tns, thh, tmm, tss;
                    tns  = cur_stream->ic->duration / 1000000LL;
                    thh  = tns / 3600;
                    tmm  = (tns % 3600) / 60;
                    tss  = (tns % 60);
                    frac = x / cur_stream->width;
                    ns   = frac * tns;
                    hh   = ns / 3600;
                    mm   = (ns % 3600) / 60;
                    ss   = (ns % 60);
                    av_log(NULL, AV_LOG_INFO,
                           "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
                            hh, mm, ss, thh, tmm, tss);
                    ts = frac * cur_stream->ic->duration;
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
                        ts += cur_stream->ic->start_time;
                    stream_seek(cur_stream, ts, 0, 0);
                }
            break;
        case SDL_WINDOWEVENT:
            switch (event.window.event) {
                case SDL_WINDOWEVENT_SIZE_CHANGED:
                    screen_width  = cur_stream->width  = event.window.data1;
                    screen_height = cur_stream->height = event.window.data2;
                    if (cur_stream->vis_texture) {
                        SDL_DestroyTexture(cur_stream->vis_texture);
                        cur_stream->vis_texture = NULL;
                    }
                case SDL_WINDOWEVENT_EXPOSED:
                    cur_stream->force_refresh = 1;
            }
            break;
        case SDL_QUIT:
        case FF_QUIT_EVENT:
            do_exit(cur_stream);
            break;
        default:
            break;
        }
    }
}

static int opt_frame_size(void *optctx, const char *opt, const char *arg)
{
    av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
    return opt_default(NULL, "video_size", arg);
}

static int opt_width(void *optctx, const char *opt, const char *arg)
{
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
    return 0;
}

static int opt_height(void *optctx, const char *opt, const char *arg)
{
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
    return 0;
}

static int opt_format(void *optctx, const char *opt, const char *arg)
{
    file_iformat = av_find_input_format(arg);
    if (!file_iformat) {
        av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
        return AVERROR(EINVAL);
    }
    return 0;
}

static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
{
    av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
    return opt_default(NULL, "pixel_format", arg);
}

static int opt_sync(void *optctx, const char *opt, const char *arg)
{
    if (!strcmp(arg, "audio"))
        av_sync_type = AV_SYNC_AUDIO_MASTER;
    else if (!strcmp(arg, "video"))
        av_sync_type = AV_SYNC_VIDEO_MASTER;
    else if (!strcmp(arg, "ext"))
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
    else {
        av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
        exit(1);
    }
    return 0;
}

/**
 * 设置 seek 的函数
 * @brief opt_seek
 * @param optctx
 * @param opt
 * @param arg
 * @return
 */
static int opt_seek(void *optctx, const char *opt, const char *arg)
{
    // 将其转换为微妙,例如 00:10 = 10000000
    start_time = parse_time_or_die(opt, arg, 1);
    return 0;
}

static int opt_duration(void *optctx, const char *opt, const char *arg)
{
    duration = parse_time_or_die(opt, arg, 1);
    return 0;
}

static int opt_show_mode(void *optctx, const char *opt, const char *arg)
{
    show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
                !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
                !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
                parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
    return 0;
}

static void opt_input_file(void *optctx, const char *filename)
{
    if (input_filename) {
        av_log(NULL, AV_LOG_FATAL,
               "Argument '%s' provided as input filename, but '%s' was already specified.\n",
                filename, input_filename);
        exit(1);
    }
    if (!strcmp(filename, "-"))
        filename = "pipe:";
    input_filename = filename;
}

static int opt_codec(void *optctx, const char *opt, const char *arg)
{
   const char *spec = strchr(opt, ':');
   if (!spec) {
       av_log(NULL, AV_LOG_ERROR,
              "No media specifier was specified in '%s' in option '%s'\n",
               arg, opt);
       return AVERROR(EINVAL);
   }
   spec++;
   switch (spec[0]) {
   case 'a' :    audio_codec_name = arg; break;
   case 's' : subtitle_codec_name = arg; break;
   case 'v' :    video_codec_name = arg; break;
   default:
       av_log(NULL, AV_LOG_ERROR,
              "Invalid media specifier '%s' in option '%s'\n", spec, opt);
       return AVERROR(EINVAL);
   }
   return 0;
}

static int dummy;

static const OptionDef options[] = {
    CMDUTILS_COMMON_OPTIONS
    { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
    { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
    { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
    { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
    { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
    { "vn", OPT_BOOL, { &video_disable }, "disable video" },
    { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
    { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
    { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
    { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
    { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
    { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
    { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
    { "seek_interval", OPT_FLOAT | HAS_ARG, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
    { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
    { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
    { "alwaysontop", OPT_BOOL, { &alwaysontop }, "window always on top" },
    { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
    { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
    { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
    { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
    { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
    { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
    { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
    { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
    { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
    { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
    { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
    { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
    { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
    { "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
    { "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
#if CONFIG_AVFILTER
    { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
    { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
#endif
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
    { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
    { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
    { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
    { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
    { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, {    &audio_codec_name }, "force audio decoder",    "decoder_name" },
    { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
    { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, {    &video_codec_name }, "force video decoder",    "decoder_name" },
    { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
    { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
        "read and decode the streams to fill missing information with heuristics" },
    { "filter_threads", HAS_ARG | OPT_INT | OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
    { NULL, },
};

static void show_usage(void)
{
    av_log(NULL, AV_LOG_INFO, "Simple media player\n");
    av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
    av_log(NULL, AV_LOG_INFO, "\n");
}

void show_help_default(const char *opt, const char *arg)
{
    av_log_set_callback(log_callback_help);
    show_usage();
    show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
    show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
    printf("\n");
    show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
    show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
#if !CONFIG_AVFILTER
    show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
#else
    show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
#endif
    printf("\nWhile playing:\n"
           "q, ESC              quit\n"
           "f                   toggle full screen\n"
           "p, SPC              pause\n"
           "m                   toggle mute\n"
           "9, 0                decrease and increase volume respectively\n"
           "/, *                decrease and increase volume respectively\n"
           "a                   cycle audio channel in the current program\n"
           "v                   cycle video channel\n"
           "t                   cycle subtitle channel in the current program\n"
           "c                   cycle program\n"
           "w                   cycle video filters or show modes\n"
           "s                   activate frame-step mode\n"
           "left/right          seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
           "down/up             seek backward/forward 1 minute\n"
           "page down/page up   seek backward/forward 10 minutes\n"
           "right mouse click   seek to percentage in file corresponding to fraction of width\n"
           "left double-click   toggle full screen\n"
           );
}

/* Called from the main */
int main(int argc, char *argv[])
{
    int flags;
    // ffplay 播放器全局管理
    VideoState *is;

    //设置动态库加载规则，
    // 这是一个安全函数，
    // 在 Windows 系统，默认会从当前目录加载 DLL，
    // 这容易被攻击。这个函数就是把当前目录的路径从加载规则里面去掉，
    // 里面调的是 SetDllDirectory("");
    init_dynload();

    av_log_set_flags(AV_LOG_SKIP_REPEATED);
    // 解析日志级别
    parse_loglevel(argc, argv, options);

    /* register all codecs, demux and protocols */
#if CONFIG_AVDEVICE
    avdevice_register_all();
#endif
    avformat_network_init();

    init_opts();

    signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
    signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */

    // 打印 ffplay 这个软件的版权，版本之类的信息
    show_banner(argc, argv, options);

    // 解析命令行参数
    parse_options(NULL, argc, argv, options, opt_input_file);

    // 命令行中的文件名称
    if (!input_filename) {
        show_usage();
        av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
        av_log(NULL, AV_LOG_FATAL,
               "Use -h to get full help or, even better, run 'man %s'\n", program_name);
        exit(1);
    }

    // 图像显示 SDL
    if (display_disable) {
        video_disable = 1;
    }
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
    if (audio_disable)
        flags &= ~SDL_INIT_AUDIO;
    else {
        /* Try to work around an occasional ALSA buffer underflow issue when the
         * period size is NPOT due to ALSA resampling by forcing the buffer size. */
        if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
            SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
    }
    if (display_disable)
        flags &= ~SDL_INIT_VIDEO;
    if (SDL_Init (flags)) {
        av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
        av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
        exit(1);
    }

    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);

    if (!display_disable) {
        int flags = SDL_WINDOW_HIDDEN;
        if (alwaysontop)
#if SDL_VERSION_ATLEAST(2,0,5)
            flags |= SDL_WINDOW_ALWAYS_ON_TOP;
#else
            av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
#endif
        if (borderless)
            flags |= SDL_WINDOW_BORDERLESS;
        else
            flags |= SDL_WINDOW_RESIZABLE;
        // 创建 SDL 窗口
        window = SDL_CreateWindow(program_name,
                                  SDL_WINDOWPOS_UNDEFINED,
                                  SDL_WINDOWPOS_UNDEFINED,
                                  default_width,
                                  default_height,
                                  flags);
        SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
        if (window) {
            renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
            if (!renderer) {
                av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
                renderer = SDL_CreateRenderer(window, -1, 0);
            }
            // 获取熏染信息，后面视频显示时会用到
            if (renderer) {
                if (!SDL_GetRendererInfo(renderer, &renderer_info))
                    av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
            }
        }
        if (!window || !renderer || !renderer_info.num_texture_formats) {
            av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
            do_exit(NULL);
        }
    }

    // 创建 audio_thread、video_thread、subtitle_thread
    // read_thread(): 从网络或硬盘里读取 AVPacket，读取到之后放进 PacketQueue 队列。
    // audio_thread(): 从PacketQueue audioq 队列拿 AVPacket，然后丢给解码器解码，解码出来 AVFrame 之后，再把 AVFrame 丢到 FrameQueue 队列
    // video_thread(): 从PacketQueue videoq 队列拿 AVPacket，然后丢给解码器解码，解码出来 AVFrame 之后，再把 AVFrame 丢给 FrameQueue 队列
    // subtitle_thread(): 暂不关注
    is = stream_open(input_filename, file_iformat);
    if (!is) {
        av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
        do_exit(NULL);
    }

    // 处理播放行为，比如暂停、快进、停止等
    event_loop(is);

    /* never returns */

    return 0;
}
