#include "fvideodecode.h"

#include <QDebug>

extern "C" {
#include <libavformat/avformat.h>
#include <libswresample/swresample.h>
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>
#include <libavcodec/avcodec.h>
#include <libavutil/hwcontext.h>
}

FVideoDecode::FVideoDecode(QObject *parent)
    : QThread(parent)
{
}

FVideoDecode::~FVideoDecode()
{
    addAVPacket(nullptr);
    wait();
    if (m_pAVDecoderCtx)
        avcodec_free_context(&m_pAVDecoderCtx);
}

void FVideoDecode::addAVPacket(AVPacket *packet)
{
    if (!m_pAVStream)
        return;

    if (m_pAVStream->duration > 0)
        m_queue2.enqueue(packet);
    else
        m_queue.enqueue(packet);
}

void FVideoDecode::run()
{
    int64_t last_pts = 0;
    //分配数据帧
    AVFrame *pAVFrame = av_frame_alloc();
    AVPacket *pAVPacket = nullptr;
    while (!isInterruptionRequested()) {
        if (m_pAVStream->duration > 0)
            pAVPacket = m_queue2.dequeue();
        else {
            pAVPacket = m_queue.dequeue();
        }

        if (!m_pAVDecoderCtx || !pAVPacket)
            break;
        //发送数据到解码器
        auto res = avcodec_send_packet(m_pAVDecoderCtx, pAVPacket);
        if (0 != res) {
            qWarning() << "发送帧到解码器失败";
            av_packet_unref(pAVPacket);
            continue;
        }

        //接收解码后的数据
        if (0 != avcodec_receive_frame(m_pAVDecoderCtx, pAVFrame)) {
            qWarning() << "解码帧失败";
            av_packet_unref(pAVPacket);
            continue;
        }
        //图像转换
        //            sws_scale(pSwsContext, (const uint8_t *const *)pAVFrame->data, pAVFrame->linesize, 0, videoHeight, pAVFrameYUV->data, pAVFrameYUV->linesize);
        // 显示帧
        AVFrame *frame_copy = av_frame_clone(pAVFrame);
        if (frame_copy) {
            emit sigYuv(frame_copy);
        }

        // 计算延迟，根据帧的时间戳决定是否显示此帧
        if (m_pAVStream->duration > 0 && pAVFrame->pts != AV_NOPTS_VALUE) {
            int64_t diff = pAVFrame->pts - last_pts; // 计算与上一帧的时间差
            // 根据时间差sleep或者直接显示
            usleep(diff * av_q2d(m_pAVStream->time_base) * 1000000); // 假设time_base已经正确设置
            last_pts = pAVFrame->pts;
        }
        av_packet_unref(pAVPacket);
        av_frame_unref(pAVFrame);
    }
}

bool FVideoDecode::initAVStream(AVStream *pAVStream)
{
    if (!pAVStream)
        return false;

    m_pAVStream = pAVStream;
    //视频编码信息
    //获取视频解码器
    m_pAVCodec = avcodec_find_decoder(m_pAVStream->codecpar->codec_id);
    if (!m_pAVCodec) {
        qWarning() << "获取解码器失败!";
        return false;
    }

    // 创建解码上下文
    m_pAVDecoderCtx = avcodec_alloc_context3(m_pAVCodec);
    if (!m_pAVDecoderCtx) {
        qWarning() << "无法分配解码上下文";
        return false;
    }
    //初始化解码上下文
    if (avcodec_parameters_to_context(m_pAVDecoderCtx, m_pAVStream->codecpar) < 0) {
        qWarning() << "无法复制编解码器参数到解码上下文";
        return false;
    }

    //打开解码器
    if (avcodec_open2(m_pAVDecoderCtx, m_pAVCodec, NULL) < 0) {
        qWarning() << "打开解码器失败";
        return false;
    }

    //    AVFrame *pAVFrameYUV = av_frame_alloc();
    //创建图像格式转化上下文
    //    SwsContext *pSwsContext = sws_getContext(videoWidth, videoHeight, static_cast<AVPixelFormat>(pVideoCodecPar->format), videoWidth, videoHeight, AV_PIX_FMT_YUV420P, SWS_BICUBIC, nullptr, nullptr, nullptr);
    //分配图像缓存空间
    // 分配图像缓冲区和初始化指针数组及每行字节数数组
    //    int numBytes = av_image_alloc(pAVFrameYUV->data, pAVFrameYUV->linesize, videoWidth, videoHeight, AV_PIX_FMT_YUV420P, 8);
    //    if (numBytes < 0) {
    //        fprintf(stderr, "无法分配图像缓冲区\n");
    //        return;
    //    }
    //计算指定格式图像所需缓存大小
    //    uint8_t *out_buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
    //    av_image_fill_arrays(pAVFrameYUV->data, pAVFrameYUV->linesize, out_buffer, AV_PIX_FMT_YUV420P, videoWidth, videoHeight, 1);

    start();
    return true;
}
