
#include <assert.h> //断言

#include <atomic>
#include <chrono>
#include <condition_variable>
#include <cstdlib>
#include <future>
#include <iostream>
#include <mutex>
#include <queue>
#include <string>
#include <thread>
#include <vector>
#include "glframework/core.h"
#include "glframework/checkError.h"
#include "glframework/shader.h"
#include "glframework/Application.h"
#include "third_party/threadpool/include/threadpool.h"
extern "C"
{
#include <alsa/asoundlib.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/imgutils.h>
#include <libswresample/swresample.h>
#include <libswscale/swscale.h>
#include <libavdevice/avdevice.h> // 必须包含此头文件
}
#include <atomic>
#include <mutex>
GLuint vao;
GLuint textures[3];
Shader *shader = nullptr;
AVFrame *frame = nullptr;
int width = 0, height = 0;
std::vector<uint8_t> g_DataY;
std::vector<uint8_t> g_DataU;
std::vector<uint8_t> g_DataV;

std::mutex dataMutex;

std::atomic<bool> g_FrameReady(false);
void OnResize(int width, int height)
{
  GL_CALL(glViewport(0, 0, width, height));
  std::cout << "OnResize" << std::endl;
}

void OnKey(int key, int action, int mods) { std::cout << key << std::endl; }

void prepareVAO()
{
  // 顶点位置（屏幕坐标系）
  float positions[] = {
      -1.f, -1.f, // 0:左下
      1.f, -1.f,  // 1:右下
      -1.f, 1.f,  // 2:左上
      1.f, 1.f    // 3:右上
  };

  // 修正后的UV映射
  float uvs[] = {0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f};

  // 索引数组（两个三角形）
  unsigned int indices[] = {0, 1, 2, 1, 3, 2};

  // 2 VBO创建
  GLuint posVbo;
  glGenBuffers(1, &posVbo);              // 创建
  glBindBuffer(GL_ARRAY_BUFFER, posVbo); // 绑定
  glBufferData(GL_ARRAY_BUFFER, sizeof(positions), positions,
               GL_STATIC_DRAW); // 灌数据

  GLuint uvVbo;
  glGenBuffers(1, &uvVbo);
  glBindBuffer(GL_ARRAY_BUFFER, uvVbo);
  glBufferData(GL_ARRAY_BUFFER, sizeof(uvs), uvs, GL_STATIC_DRAW);

  // 3 EBO创建
  GLuint ebo;
  glGenBuffers(1, &ebo);                      // 创建
  glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo); // 绑定
  glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices,
               GL_STATIC_DRAW); // 灌数据

  // 4 VAO创建
  glGenVertexArrays(1, &vao);
  glBindVertexArray(vao);

  // 5 绑定vbo ebo 加入属性描述信息
  // 5.1 加入位置属性描述信息
  glBindBuffer(GL_ARRAY_BUFFER, posVbo);
  glEnableVertexAttribArray(0);
  glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(float), (void *)0);

  // 5.2 加入uv属性描述数据
  glBindBuffer(GL_ARRAY_BUFFER, uvVbo);
  glEnableVertexAttribArray(1);
  glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(float), (void *)0);
  glEnableVertexAttribArray(1);

  glGenTextures(3, textures);

  // Y分量纹理 (全分辨率)
  glBindTexture(GL_TEXTURE_2D, textures[0]);
  glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, width, height, 0, GL_RED,
               GL_UNSIGNED_BYTE, NULL);
  glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);

  // U分量纹理 (1/4分辨率)
  glBindTexture(GL_TEXTURE_2D, textures[1]);
  glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, width / 2, height / 2, 0, GL_RED,
               GL_UNSIGNED_BYTE, NULL);
  glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);

  // V分量纹理 (1/4分辨率)
  glBindTexture(GL_TEXTURE_2D, textures[2]);
  glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, width / 2, height / 2, 0, GL_RED,
               GL_UNSIGNED_BYTE, NULL);
  glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);

  // 5.3 加入ebo到当前的vao
  glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo);

  glBindVertexArray(0);
}

void prepareShader()
{
  shader =
      new Shader("assets/shaders/vertex.glsl", "assets/shaders/fragment.glsl");
}

void render()
{
  std::cout << "rendor" << std::endl;
  // 执行opengl画布清理操作
  GL_CALL(glClear(GL_COLOR_BUFFER_BIT));

  // 绑定当前的program
  shader->begin();

  glActiveTexture(GL_TEXTURE0);
  glBindTexture(GL_TEXTURE_2D, textures[0]);
  glUniform1i(glGetUniformLocation(shader->mProgram, "yTexture"), 0);

  glActiveTexture(GL_TEXTURE1);
  glBindTexture(GL_TEXTURE_2D, textures[1]);
  glUniform1i(glGetUniformLocation(shader->mProgram, "uTexture"), 1);

  glActiveTexture(GL_TEXTURE2);
  glBindTexture(GL_TEXTURE_2D, textures[2]);
  glUniform1i(glGetUniformLocation(shader->mProgram, "vTexture"), 2);
  // 绑定当前的vao
  GL_CALL(glBindVertexArray(vao));

  // 发出绘制指令
  GL_CALL(glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0));
  GL_CALL(glBindVertexArray(0));

  shader->end();
}

bool decode_video(const char *input_file)
{
  AVFormatContext *format_ctx = nullptr;
  unsigned int video_stream_index = 0;

  int frame_number = 0;

  // 初始化 FFmpeg网络组件
  avformat_network_init();
  // const AVInputFormat *fmt = av_find_input_format("flv"); //
  // RTMP通常封装为FLV 打开输入文件
  if (avformat_open_input(&format_ctx, input_file, nullptr, nullptr) != 0)
  {
    return false;
  }

  /*
  * 流信息的含义
      在多媒体文件中，流（Stream）指的是视频流、音频流和字幕流等内容。每个流包含了特定类型的数据，例如：
      视频流：包含视频帧的数据。
      音频流：包含音频样本的数据。
      字幕流：包含字幕文本的数据。
  avformat_find_stream_info
      这个函数会对多媒体文件进行分析，并填充 AVFormatContext
  结构中的流信息，具体包括：
          流的数量：例如，一个视频文件可能包含一个视频流和一个音频流。
          流的类型：如视频流、音频流、字幕流等。
          编解码器参数：包括视频分辨率、音频采样率等信息。
          时基（Time Base）：定义了时间戳的时间单位。
  **/
  if (avformat_find_stream_info(format_ctx, nullptr) < 0)
  {
    return false;
  }
  else
  {
    av_dump_format(format_ctx, 0, input_file, 0);
  }

  // 查找视频流
  for (unsigned i = 0; i < format_ctx->nb_streams;
       ++i)
  { // nb_streams 是流的数量（视频流、音频流、字幕流）
    if (format_ctx->streams[i]->codecpar->codec_type ==
        AVMEDIA_TYPE_VIDEO)
    { // streams 记录着所有的流
      video_stream_index = i;
      break;
    }
  }

  if (video_stream_index == -1)
  {
    return false;
  }

  // AVCodecParameters 用于存储与特定流（视频流、音频流等）相关的编解码器参数
  // AVCodec 结构体包含了编解码器的名称、类型、ID等基本信息
  const AVCodec *codec = avcodec_find_decoder(
      format_ctx->streams[video_stream_index]->codecpar->codec_id);
  if (!codec)
  {
    return false;
  }

  // 为指定的编码器codec分配初始化一个编解码器上下文，但只是空白框架
  AVCodecContext *codec_ctx = avcodec_alloc_context3(codec);
  if (!codec_ctx)
  {
    return false;
  }

  // 将流的编解码器参数 (AVCodecParameters) 复制到编解码器上下文
  if (avcodec_parameters_to_context(
          codec_ctx, format_ctx->streams[video_stream_index]->codecpar) < 0)
  {
    return false;
  }

  // 继续初始化编解码器参数
  if (avcodec_open2(codec_ctx, codec, nullptr) < 0)
  {
    return false;
  }
  AVPixelFormat original_pix_fmt = codec_ctx->pix_fmt; // 输出原始像素格式
  // 分配AVFrame，用于存储解码后的帧数据的各种信息（图像像素数据、宽高、时间戳等）
  frame = av_frame_alloc();
  if (!frame)
  {
    std::cerr << "Could not allocate frame." << std::endl;
    av_frame_free(&frame);
    avcodec_free_context(&codec_ctx);
    avformat_close_input(&format_ctx);
  }
  std::cout << "codec_ctx->width:" << codec_ctx->width << std::endl;
  width = codec_ctx->width;
  height = codec_ctx->height;
  std::cout << "codec_ctx->width:" << codec_ctx->width << std::endl;
  // 初始化创建一个图像缩放和颜色空间转换的上下文 SwsContext
  struct SwsContext *sws_ctx =
      sws_getContext(codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt,
                     codec_ctx->width, codec_ctx->height, AV_PIX_FMT_RGB24,
                     SWS_BILINEAR, nullptr, nullptr, nullptr);
  if (!sws_ctx)
  {
    std::cerr << "Could not initialize the conversion context." << std::endl;
    av_frame_free(&frame);
    avcodec_free_context(&codec_ctx);
    avformat_close_input(&format_ctx);
  }

  // AVPacket：存储的是压缩编码的数据包（视频流、音频流等都有可能）
  // 一个AVPacket可能对应多个AVFrame
  AVPacket packet;
  int64_t previous_pts = 0;
  while (av_read_frame(format_ctx, &packet) >= 0)
  {
    auto start_time = std::chrono::high_resolution_clock::now();
    if (packet.stream_index == video_stream_index)
    { // 只拿视频流
      if (avcodec_send_packet(codec_ctx, &packet) <
          0)
      {                           // 发送原始数据包,并不会立即解码，而是放到输入缓冲区
        av_packet_unref(&packet); // 释放 packet 中的所有动态分配的内存和资源
        continue;
      }
      int cycle_count = 0;
      while (avcodec_receive_frame(codec_ctx, frame) ==
             0)
      { // 解码，一个packet可能对应多个frame帧
        frame_number++;
        std::cout << "frame->width:" << frame->width << std::endl;
        if (frame->width != codec_ctx->width ||
            frame->height != codec_ctx->height)
        {
          std::cerr << "Frame size does not match context settings"
                    << std::endl;
          return false;
        }
        if (packet.data == nullptr || packet.size <= 0)
        {
          std::cerr << "Incomplete packet data" << std::endl;
          return false;
        }
        int64_t pts = frame->pts; // 根据视频参数设置播放速率
        double frame_delay =
            av_q2d(format_ctx->streams[video_stream_index]->time_base) *
            (pts - previous_pts);
        // std::cout << "time base: " <<
        // av_q2d(format_ctx->streams[video_stream_index]->time_base) << ", pts:
        // " << pts << ", previous_pts: " << previous_pts << ", pts -
        // previous_pts: "<< pts - previous_pts << ", frame_delay: " <<
        // frame_delay << std::endl;
        previous_pts = pts;

        auto target_time =
            start_time +
            std::chrono::microseconds(static_cast<long>(frame_delay * 1e6));
        auto usetime =
            std::chrono::duration_cast<std::chrono::microseconds>(
                std::chrono::high_resolution_clock::now() - start_time)
                .count();
        auto diff = frame_delay * 1e6 - static_cast<double>(usetime);
        if (diff > 1000)
        {
          std::this_thread::sleep_for(
              std::chrono::microseconds(static_cast<int>(diff)));
        }
        g_FrameReady.store(true, std::memory_order_release);
        start_time = std::chrono::high_resolution_clock::now();
      }
      av_packet_unref(&packet);
    }
  }
  return true;
}

bool refreshFrame()
{
  std::lock_guard<std::mutex> lock(dataMutex); // 加锁
  if (frame->width == 0 || frame->height == 0)
  {
    return false;
  }
  std::cout << "efreshFrame frame->width" << frame->width << std::endl;
  glBindTexture(GL_TEXTURE_2D, textures[0]);
  glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, frame->width, frame->height, 0, GL_RED,
               GL_UNSIGNED_BYTE, frame->data[0]);

  // U分量
  glBindTexture(GL_TEXTURE_2D, textures[1]);
  glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, frame->width / 2, frame->height / 2, 0,
               GL_RED, GL_UNSIGNED_BYTE, frame->data[1]);

  // V分量
  glBindTexture(GL_TEXTURE_2D, textures[2]);
  glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, frame->width / 2, frame->height / 2, 0,
               GL_RED, GL_UNSIGNED_BYTE, frame->data[2]);
  return true;
}

int playAudio()
{
  const char *filePath = "/home/bzsg/workspace/test.mp4";
  const char outFileName[] = "./test.txt";
  FILE *file = fopen(outFileName, "w+b");
  AVFormatContext *fmtCtx = nullptr;
  AVPacket *pkt = nullptr;
  AVFrame *frame = nullptr;
  AVCodecContext *codecCtx = nullptr;
  const AVCodec *codec = nullptr;
  int ret = 0;
  int audioStreamIndex = -1;

  // 初始化FFmpeg库
  avformat_network_init(); // 支持网络流

  // 1. 打开输入文件
  if ((ret = avformat_open_input(&fmtCtx, filePath, nullptr, nullptr)) != 0)
  {
    char errBuf[AV_ERROR_MAX_STRING_SIZE];
    av_strerror(ret, errBuf, sizeof(errBuf)); // 将错误信息写入到error buf
    std::cerr << "Cannot open video: " << errBuf << std::endl;
    return -1;
  }

  // 2. 获取流信息
  if ((ret = avformat_find_stream_info(fmtCtx, nullptr)) < 0)
  {
    std::cerr << "Cannot find stream info" << std::endl;
    avformat_close_input(&fmtCtx);
    return -1;
  }
  else
  {
    av_dump_format(fmtCtx, 0, filePath, 0); // 输出文件信息
  }

  // 3. 查找音频流
  for (unsigned int i = 0; i < fmtCtx->nb_streams; i++)
  {
    if (fmtCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
    {
      audioStreamIndex = i;
      std::cout << "find audio stream i:" << i << std::endl;
    }
  }

  if (audioStreamIndex == -1)
  {
    std::cerr << "No audio stream found" << std::endl;
    avformat_close_input(&fmtCtx);
    return -1;
  }

  // 4. 获取视频解码器
  AVCodecParameters *codecPar = fmtCtx->streams[audioStreamIndex]->codecpar;
  codec = avcodec_find_decoder(codecPar->codec_id);
  if (!codec)
  {
    std::cerr << "Unsupported codec" << std::endl;
    avformat_close_input(&fmtCtx);
    return -1;
  }

  // 5. 创建解码器上下文
  codecCtx = avcodec_alloc_context3(codec);
  if (!codecCtx)
  {
    std::cerr << "Failed to allocate codec context" << std::endl;
    avformat_close_input(&fmtCtx);
    return -1;
  }

  // 6. 复制编解码器参数
  if ((ret = avcodec_parameters_to_context(codecCtx, codecPar)) < 0)
  {
    std::cerr << "Failed to copy codec parameters" << std::endl;
    avcodec_free_context(&codecCtx);
    avformat_close_input(&fmtCtx);
    return -1;
  }

  // 7. 打开解码器
  if ((ret = avcodec_open2(codecCtx, codec, nullptr)) < 0)
  {
    std::cerr << "Failed to open codec" << std::endl;
    avcodec_free_context(&codecCtx);
    avformat_close_input(&fmtCtx);
    return -1;
  }

  // 初始化重采样器 (转PCM S16)
  AVChannelLayout out_ch_layout = AV_CHANNEL_LAYOUT_STEREO; // 输出声道布局
  SwrContext *swrCtx = nullptr;
  swr_alloc_set_opts2(&swrCtx,
                      &out_ch_layout,        // 输出布局
                      AV_SAMPLE_FMT_S16,     // 输出格式
                      codecCtx->sample_rate, // 输出采样率
                      &codecCtx->ch_layout,  // 输入布局
                      codecCtx->sample_fmt,  // 输入格式
                      codecCtx->sample_rate, // 输入采样率
                      0, nullptr);
  swr_init(swrCtx);
  // 获取实际声道数（替代硬编码的2）
  int out_channels = out_ch_layout.nb_channels;

  // 初始化ALSA
  snd_pcm_t *pcmHandle;
  if (snd_pcm_open(&pcmHandle, "default", SND_PCM_STREAM_PLAYBACK, 0) < 0)
  {
    std::cerr << "ALSA open error" << std::endl;
    return -1;
  }

  // 设置ALSA参数
  snd_pcm_hw_params_t *hwParams;
  snd_pcm_hw_params_alloca(&hwParams);
  snd_pcm_hw_params_any(pcmHandle, hwParams);
  snd_pcm_hw_params_set_access(pcmHandle, hwParams,
                               SND_PCM_ACCESS_RW_INTERLEAVED);
  snd_pcm_hw_params_set_format(pcmHandle, hwParams, SND_PCM_FORMAT_S16_LE);
  snd_pcm_hw_params_set_channels(pcmHandle, hwParams, out_channels); // 立体声
  snd_pcm_hw_params_set_rate(pcmHandle, hwParams, codecCtx->sample_rate, 0);
  unsigned int buffer_time = 50000;
  snd_pcm_hw_params_set_buffer_time_near(pcmHandle, hwParams, &buffer_time, 0);
  unsigned int period_time = buffer_time / 4;
  snd_pcm_hw_params_set_period_time_near(pcmHandle, hwParams, &period_time, 0);
  snd_pcm_hw_params(pcmHandle, hwParams);
  // 8. 准备数据包
  pkt = av_packet_alloc();
  frame = av_frame_alloc();
  uint8_t *pcmBuffer = nullptr;
  if (!pkt || !frame)
  {
    std::cerr << "Failed to allocate packet/frame" << std::endl;
    av_packet_free(&pkt);
    av_frame_free(&frame);
    avcodec_free_context(&codecCtx);
    avformat_close_input(&fmtCtx);
    return -1;
  }
  double total_duration_sec = 0;
  int frame_count = 0;
  while (av_read_frame(fmtCtx, pkt) >= 0)
  {
    if (pkt->stream_index == audioStreamIndex)
    {
      // 发送数据包
      int send_ret = avcodec_send_packet(codecCtx, pkt);
      if (send_ret < 0)
      {
        std::cerr << "发送包错误: " << av_err2str(send_ret) << std::endl;
        continue;
      }
      // 接收并处理所有解码帧
      while (true)
      {
        av_frame_unref(frame); // 关键：重置帧状态
        int recv_ret = avcodec_receive_frame(codecCtx, frame);
        // 处理接收状态
        if (recv_ret == AVERROR(EAGAIN) || recv_ret == AVERROR_EOF)
        {
          break; // 需要新数据或结束
        }
        else if (recv_ret < 0)
        {
          std::cerr << "接收帧错误: " << av_err2str(recv_ret) << std::endl;
          break;
        }
        // 计算时间基转换因子
        AVRational time_base = fmtCtx->streams[audioStreamIndex]->time_base;
        double time_base_factor = av_q2d(time_base);

        // 计算当前帧时长（秒）
        double frame_duration =
            (frame->duration > 0)
                ? frame->duration * time_base_factor
                : frame->nb_samples /
                      (double)codecCtx->sample_rate; // 备用计算

        // 打印关键信息
        printf("Frame %d: PTS=%.3fs, Duration=%.3fs, nb_samples=%d\n",
               frame_count, frame->pts * time_base_factor, frame_duration,
               frame->nb_samples);
        fprintf(file, "Frame %d: PTS=%.3fs, Duration=%.3fs, nb_samples=%d\n",
                frame_count, frame->pts * time_base_factor, frame_duration,
                frame->nb_samples);
        // 累计统计
        total_duration_sec += frame_duration;
        frame_count++;
        // ==== 重采样核心部分 ====
        // 计算输出样本数（带错误检查）
        int outSamples = swr_get_out_samples(swrCtx, frame->nb_samples);
        if (outSamples < 0)
        {
          std::cerr << "swr_get_out_samples错误: " << av_err2str(outSamples)
                    << std::endl;
          continue;
        }

        // 使用 av_samples_alloc 分配单缓冲区
        uint8_t *audioBuf = nullptr; // 单缓冲区指针
        int linesize;
        int alloc_ret =
            av_samples_alloc(&audioBuf,         // 输出缓冲区指针
                             &linesize,         // 输出行大小
                             out_channels,      // 输出声道数
                             outSamples,        // 每通道样本数
                             AV_SAMPLE_FMT_S16, // 输出格式(S16交错)
                             0                  // 对齐方式(0=默认)
            );

        if (alloc_ret < 0)
        {
          std::cerr << "缓冲区分配失败: " << av_err2str(alloc_ret) << std::endl;
          continue;
        }

        // 创建指针数组用于swr_convert（交错格式只需第一个指针）
        uint8_t *audioData[1] = {audioBuf}; // 单元素指针数组

        // 执行重采样转换
        int realSamples =
            swr_convert(swrCtx,
                        audioData,                     // 输出缓冲区数组
                        outSamples,                    // 输出容量
                        (const uint8_t **)frame->data, // 输入数据
                        frame->nb_samples);

        if (realSamples < 0)
        {
          std::cerr << "重采样错误: " << av_err2str(realSamples) << std::endl;
          av_freep(&audioBuf); // 安全释放内存
          continue;
        }

        // ==== ALSA播放核心部分 ====
        snd_pcm_sframes_t write_ret =
            snd_pcm_writei(pcmHandle,
                           audioBuf, // 直接使用交错格式的缓冲区
                           realSamples);

        // 处理ALSA写入错误（保持不变）
        if (write_ret == -EPIPE)
        {
          std::cerr << "ALSA缓冲区欠载" << std::endl;
          snd_pcm_prepare(pcmHandle);
        }
        else if (write_ret < 0)
        {
          std::cerr << "ALSA写入错误: " << snd_strerror(write_ret) << std::endl;
        }
        else if (write_ret != realSamples)
        {
          std::cerr << "ALSA部分写入: " << write_ret << "/" << realSamples
                    << std::endl;
        }

        // 释放单缓冲区
        av_freep(&audioBuf); // 使用av_freep避免悬空指针
      }
      // 获取文件总时长
      double file_duration_sec = fmtCtx->duration / (double)AV_TIME_BASE;

      // 打印最终统计
      printf("\n===== 诊断报告 =====\n");
      printf("处理帧数: %d\n", frame_count);
      printf("累计时长: %.3f 秒\n", total_duration_sec);
      printf("文件时长: %.3f 秒\n", file_duration_sec);
      printf("缺失时长: %.3f 秒\n", file_duration_sec - total_duration_sec);

      fprintf(file, "处理帧数: %d\n", frame_count);
      fprintf(file, "累计时长: %.3f 秒\n", total_duration_sec);
      fprintf(file, "文件时长: %.3f 秒\n", file_duration_sec);
      fprintf(file, "缺失时长: %.3f 秒\n",
              file_duration_sec - total_duration_sec);
      av_packet_unref(pkt);
    }
  }
  std::cout << "等待ALSA播放剩余缓冲数据..." << std::endl;
  int drain_ret = snd_pcm_drain(pcmHandle);
  if (drain_ret < 0)
  {
    std::cerr << "ALSA排空错误: " << snd_strerror(drain_ret) << std::endl;
  }
  else
  {
    std::cout << "ALSA播放队列已完全排空" << std::endl;
  }
  std::cout << "end play" << std::endl;
  // 清理资源
  free(pcmBuffer);
  av_frame_free(&frame);
  av_packet_free(&pkt);
  swr_free(&swrCtx);
  avcodec_free_context(&codecCtx);
  avformat_close_input(&fmtCtx);
  snd_pcm_close(pcmHandle);
  return 0;
}
// int main()
// {
//   const char *input_file = "/home/bzsg/workspace/test.mp4";
//   std::cout << "start ok... width: " << width << ", height: " << height
//             << ", frame_ready: " << g_FrameReady << std::endl;
//   ThreadPool threadPool;
//   threadPool.start(3);
//   threadPool.submitTask(decode_video, input_file);
//   if (!app->init(800, 600))
//   {
//     return -1;
//   }

//   app->setResizeCallback(OnResize);
//   app->setKeyBoardCallback(OnKey);

//   // 设置opengl视口以及清理颜色
//   GL_CALL(glViewport(0, 0, 800, 600));
//   GL_CALL(glClearColor(0.2f, 0.3f, 0.3f, 1.0f));

//   prepareShader();
//   prepareVAO();
//   int frameCount = 0;
//   int aviableframeCount = 0;
//   auto lastTime = std::chrono::high_resolution_clock::now();
//   while (app->update())
//   {
//     if (g_FrameReady.load(std::memory_order_acquire))
//     {
//       if (refreshFrame())
//       {
//         aviableframeCount++;
//       };
//       g_FrameReady.store(false, std::memory_order_release);
//     }
//     render();
//     // 帧率统计
//     frameCount++;
//     auto currentTime = std::chrono::high_resolution_clock::now();
//     auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(
//                        currentTime - lastTime)
//                        .count();

//     // 每1秒输出帧率
//     if (elapsed >= 1000)
//     {
//       double fps = frameCount * 1000.0 / elapsed;
//       std::cout << "FPS: " << fps << std::endl;
//       double aviablefps = aviableframeCount * 1000.0 / elapsed;
//       std::cout << "aviableFPS: " << aviablefps << std::endl;
//       // 重置计数器
//       frameCount = 0;
//       aviableframeCount = 0;
//       lastTime = currentTime;
//     }
//   }

//   app->destroy();

//   return 0;
// }

int flush_encoder(AVFormatContext *fmtCtx, AVCodecContext *codecCtx, int vStreamIndex)
{
  int ret = 0;
  AVPacket *enc_pkt = av_packet_alloc();
  enc_pkt->data = NULL;
  enc_pkt->size = 0;

  if (!(codecCtx->codec->capabilities & AV_CODEC_CAP_DELAY))
    return 0;

  printf("Flushing stream #%u encoder\n", vStreamIndex);
  if (avcodec_send_frame(codecCtx, 0) >= 0)
  {
    while (avcodec_receive_packet(codecCtx, enc_pkt) >= 0)
    {
      printf("success encoder 1 frame.\n");

      // parpare packet for muxing
      enc_pkt->stream_index = vStreamIndex;
      av_packet_rescale_ts(enc_pkt, codecCtx->time_base,
                           fmtCtx->streams[vStreamIndex]->time_base);
      ret = av_interleaved_write_frame(fmtCtx, enc_pkt);
      if (ret < 0)
      {
        break;
      }
    }
  }

  av_packet_unref(enc_pkt);

  return ret;
}

int main()
{
  int ret = 0;
  avdevice_register_all();

  AVFormatContext *inFmtCtx = avformat_alloc_context();
  AVCodecContext *inCodecCtx = NULL;
  const AVCodec *inCodec = NULL;
  AVPacket *inPkt = av_packet_alloc();
  AVFrame *srcFrame = av_frame_alloc();
  AVFrame *yuvFrame = av_frame_alloc();

  // 打开输出文件，并填充fmtCtx数据
  AVFormatContext *outFmtCtx = avformat_alloc_context();
  const AVOutputFormat *outFmt = NULL;
  AVCodecContext *outCodecCtx = NULL;
  const AVCodec *outCodec = NULL;
  AVStream *outVStream = NULL;

  AVPacket *outPkt = av_packet_alloc();

  struct SwsContext *img_ctx = NULL;

  int inVideoStreamIndex = -1;

  do
  {
    /////////////解码器部分//////////////////////
    // 打开摄像头
    const AVInputFormat *inFmt = av_find_input_format("v4l2");
    if (avformat_open_input(&inFmtCtx, "/dev/video0", inFmt, NULL) < 0)
    {
      printf("Cannot open camera.\n");
      break;
    }

    if (avformat_find_stream_info(inFmtCtx, NULL) < 0)
    {
      printf("Cannot find any stream in file.\n");
      break;
    }

    for (uint32_t i = 0; i < inFmtCtx->nb_streams; i++)
    {
      if (inFmtCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
      {
        inVideoStreamIndex = i;
        break;
      }
    }
    if (inVideoStreamIndex == -1)
    {
      printf("Cannot find video stream in file.\n");
      break;
    }

    AVCodecParameters *inVideoCodecPara = inFmtCtx->streams[inVideoStreamIndex]->codecpar;
    if (!(inCodec = avcodec_find_decoder(inVideoCodecPara->codec_id)))
    {
      printf("Cannot find valid video decoder.\n");
      break;
    }
    if (!(inCodecCtx = avcodec_alloc_context3(inCodec)))
    {
      printf("Cannot alloc valid decode codec context.\n");
      break;
    }
    if (avcodec_parameters_to_context(inCodecCtx, inVideoCodecPara) < 0)
    {
      printf("Cannot initialize parameters.\n");
      break;
    }

    if (avcodec_open2(inCodecCtx, inCodec, NULL) < 0)
    {
      printf("Cannot open codec.\n");
      break;
    }

    img_ctx = sws_getContext(inCodecCtx->width,
                             inCodecCtx->height,
                             inCodecCtx->pix_fmt,
                             inCodecCtx->width,
                             inCodecCtx->height,
                             AV_PIX_FMT_YUV420P,
                             SWS_BICUBIC,
                             NULL, NULL, NULL);

    int numBytes = av_image_get_buffer_size(AV_PIX_FMT_YUV420P,
                                            inCodecCtx->width,
                                            inCodecCtx->height, 1);
    uint8_t *out_buffer = (unsigned char *)av_malloc(numBytes * sizeof(unsigned char));

    ret = av_image_fill_arrays(yuvFrame->data,
                               yuvFrame->linesize,
                               out_buffer,
                               AV_PIX_FMT_YUV420P,
                               inCodecCtx->width,
                               inCodecCtx->height,
                               1);
    if (ret < 0)
    {
      printf("Fill arrays failed.\n");
      break;
    }
    //////////////解码器部分结束/////////////////////

    //////////////编码器部分开始/////////////////////
    const char *outFile = "camera.h264";

    if (avformat_alloc_output_context2(&outFmtCtx, NULL, NULL, outFile) < 0)
    {
      printf("Cannot alloc output file context.\n");
      break;
    }
    outFmt = outFmtCtx->oformat;

    // 打开输出文件
    if (avio_open(&outFmtCtx->pb, outFile, AVIO_FLAG_READ_WRITE) < 0)
    {
      printf("output file open failed.\n");
      break;
    }

    // 创建h264视频流，并设置参数
    outVStream = avformat_new_stream(outFmtCtx, outCodec);
    if (outVStream == NULL)
    {
      printf("create new video stream fialed.\n");
      break;
    }
    outVStream->time_base.den = 60;
    outVStream->time_base.num = 1;

    // 编码参数相关
    AVCodecParameters *outCodecPara = outFmtCtx->streams[outVStream->index]->codecpar;
    outCodecPara->codec_type = AVMEDIA_TYPE_VIDEO;
    outCodecPara->codec_id = outFmt->video_codec;
    outCodecPara->width = 1920;
    outCodecPara->height = 1080;
    outCodecPara->bit_rate = 110000;

    // 查找编码器
    outCodec = avcodec_find_encoder(outFmt->video_codec);
    if (outCodec == NULL)
    {
      printf("Cannot find any encoder.\n");
      break;
    }

    // 设置编码器内容
    outCodecCtx = avcodec_alloc_context3(outCodec);
    avcodec_parameters_to_context(outCodecCtx, outCodecPara);
    if (outCodecCtx == NULL)
    {
      printf("Cannot alloc output codec content.\n");
      break;
    }
    outCodecCtx->codec_id = outFmt->video_codec;
    outCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
    outCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
    outCodecCtx->width = inCodecCtx->width;
    outCodecCtx->height = inCodecCtx->height;
    outCodecCtx->time_base.num = 1;
    outCodecCtx->time_base.den = 60;
    outCodecCtx->bit_rate = 110000;
    outCodecCtx->gop_size = 10;

    if (outCodecCtx->codec_id == AV_CODEC_ID_H264)
    {
      outCodecCtx->qmin = 10;
      outCodecCtx->qmax = 51;
      outCodecCtx->qcompress = (float)0.6;
    }
    else if (outCodecCtx->codec_id == AV_CODEC_ID_MPEG2VIDEO)
    {
      outCodecCtx->max_b_frames = 2;
    }
    else if (outCodecCtx->codec_id == AV_CODEC_ID_MPEG1VIDEO)
    {
      outCodecCtx->mb_decision = 2;
    }

    // 打开编码器
    if (avcodec_open2(outCodecCtx, outCodec, NULL) < 0)
    {
      printf("Open encoder failed.\n");
      break;
    }
    ///////////////编码器部分结束////////////////////

    ///////////////编解码部分//////////////////////
    yuvFrame->format = outCodecCtx->pix_fmt;
    yuvFrame->width = outCodecCtx->width;
    yuvFrame->height = outCodecCtx->height;

    ret = avformat_write_header(outFmtCtx, NULL);

    int count = 0;
    while (av_read_frame(inFmtCtx, inPkt) >= 0 && count < 50)
    {
      if (inPkt->stream_index == inVideoStreamIndex)
      {
        if (avcodec_send_packet(inCodecCtx, inPkt) >= 0)
        {
          while ((ret = avcodec_receive_frame(inCodecCtx, srcFrame)) >= 0)
          {
            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
              return -1;
            else if (ret < 0)
            {
              fprintf(stderr, "Error during decoding\n");
              exit(1);
            }
            sws_scale(img_ctx,
                      (const uint8_t *const *)srcFrame->data,
                      srcFrame->linesize,
                      0, inCodecCtx->height,
                      yuvFrame->data, yuvFrame->linesize);

            yuvFrame->pts = srcFrame->pts;
            // encode
            if (avcodec_send_frame(outCodecCtx, yuvFrame) >= 0)
            {
              if (avcodec_receive_packet(outCodecCtx, outPkt) >= 0)
              {
                printf("encode %d frame.\n", count);
                ++count;
                outPkt->stream_index = outVStream->index;
                av_packet_rescale_ts(outPkt, outCodecCtx->time_base,
                                     outVStream->time_base);
                outPkt->pos = -1;
                av_interleaved_write_frame(outFmtCtx, outPkt);
                av_packet_unref(outPkt);
              }
            }

            usleep(1000 * 24);
          }
        }
        av_packet_unref(inPkt);
        fflush(stdout);
      }
    }

    ret = flush_encoder(outFmtCtx, outCodecCtx, outVStream->index);
    if (ret < 0)
    {
      printf("flushing encoder failed.\n");
      break;
    }

    av_write_trailer(outFmtCtx);
    ////////////////编解码部分结束////////////////
  } while (0);

  ///////////内存释放部分/////////////////////////
  av_packet_free(&inPkt);
  avcodec_free_context(&inCodecCtx);
  avformat_close_input(&inFmtCtx);
  av_frame_free(&srcFrame);
  av_frame_free(&yuvFrame);

  av_packet_free(&outPkt);
  avcodec_free_context(&outCodecCtx);
  avformat_close_input(&outFmtCtx);

  return 0;
}