#include "faudiodecode.h"

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswresample/swresample.h> //音频采样转换
}

FAudioDecode::FAudioDecode(QObject *parent)
    : QThread(parent)
    , m_audioPlayer(new QAudioPlayer(8192 * 16))
{
    m_audioPlayer->start_consume_audio();
}

FAudioDecode::~FAudioDecode()
{
    m_audioPlayer->stop_consume_audio();
    addAVPacket(nullptr);
    wait();
    if (m_swrCtx)
        swr_free(&m_swrCtx);
    if (m_pAVDecoderCtx)
        avcodec_free_context(&m_pAVDecoderCtx);
}

void FAudioDecode::addAVPacket(AVPacket *packet)
{
    if (!m_pAVStream)
        return;

    if (m_pAVStream->duration > 0)
        m_queue2.enqueue(packet);
    else
        m_queue.enqueue(packet);
}

bool FAudioDecode::initAVStream(AVStream *pAVStream)
{
    if (!pAVStream)
        return false;

    m_pAVStream = pAVStream;
    m_pAVCodec = avcodec_find_decoder(pAVStream->codecpar->codec_id);
    if (!m_pAVCodec) {
        qWarning() << "Failed to find audio decoder";
        return false;
    }
    m_pAVDecoderCtx = avcodec_alloc_context3(nullptr);
    if (!m_pAVDecoderCtx) {
        qWarning() << "Failed to allocate AVCodecContext for audio";
        return false;
    }
    if (avcodec_parameters_to_context(m_pAVDecoderCtx, pAVStream->codecpar) < 0) {
        qWarning() << "Failed to get audio codec context";
        return false;
    }

    if (avcodec_open2(m_pAVDecoderCtx, m_pAVCodec, nullptr) < 0) {
        qWarning() << "Failed to open audio codec";
        return false;
    }

    m_swrCtx = swr_alloc();
    if (!m_swrCtx) {
        qWarning() << "Failed to allocate m_swrCtx";
        return false;
    }

    m_swrCtx = swr_alloc_set_opts(m_swrCtx, av_get_default_channel_layout(2), AV_SAMPLE_FMT_U8, 48000,
                                  m_pAVDecoderCtx->channel_layout, m_pAVDecoderCtx->sample_fmt, m_pAVDecoderCtx->sample_rate, NULL, NULL);
    if (!m_swrCtx) {
        qWarning() << "swr_alloc_set_opts2 fail.";
        return false;
    }
    if (swr_init(m_swrCtx) < 0) {
        qWarning() << "swr_init fail.";
        return false;
    }

    auto destMs = av_q2d(pAVStream->time_base) * 1000 * pAVStream->duration;
    qWarning() << "码率:" << m_pAVDecoderCtx->bit_rate;
    qWarning() << "格式:" << m_pAVDecoderCtx->sample_fmt;
    qWarning() << "通道:" << m_pAVDecoderCtx->channels;
    qWarning() << "采样率:" << m_pAVDecoderCtx->sample_rate;
    qWarning() << "时长:" << destMs;
    qWarning() << "解码器:" << m_pAVCodec->name;
    start();
    return true;
}

void FAudioDecode::run()
{
    AVFrame *pAVFrame = av_frame_alloc();
    AVPacket *pAVPacket = nullptr;
    int64_t last_pts = 0;
    while (!isInterruptionRequested()) {
        if (m_pAVStream->duration > 0)
            pAVPacket = m_queue2.dequeue();
        else {
            pAVPacket = m_queue.dequeue();
        }

        if (!m_pAVDecoderCtx || !pAVPacket)
            break;
        //发送数据到解码器
        auto res = avcodec_send_packet(m_pAVDecoderCtx, pAVPacket);
        if (0 != res) {
            qWarning() << "发送帧到解码器失败";
            av_packet_unref(pAVPacket);
            continue;
        }

        //接收解码后的数据
        while (avcodec_receive_frame(m_pAVDecoderCtx, pAVFrame) >= 0) {
            uint8_t *data[2] = {0};
            int byteCnt = pAVFrame->nb_samples * 2 * 1;
            unsigned char *pcm = new uint8_t[byteCnt]; //frame->nb_samples*2*2表示分配样本数据量*两通道*每通道2字节大小
            data[0] = pcm; //输出格式为AV_SAMPLE_FMT_S16(packet类型),所以转换后的LR两通道都存在data[0]中
            auto ret = swr_convert(m_swrCtx,
                                   data, pAVFrame->nb_samples, //输出
                                   (const uint8_t **)pAVFrame->data, pAVFrame->nb_samples); //输入
            while (m_audioPlayer->bytesToWrite() < byteCnt)
                usleep(10);
            m_audioPlayer->write((const char *)pcm, byteCnt);
            delete[] pcm;
            // 计算延迟，根据帧的时间戳决定是否显示此帧
            if (m_pAVStream->duration > 0 && pAVFrame->pts != AV_NOPTS_VALUE) {
                int64_t diff = pAVFrame->pts - last_pts; // 计算与上一帧的时间差
                // 根据时间差sleep或者直接显示
                usleep(diff * av_q2d(m_pAVStream->time_base) * 1000000); // 假设time_base已经正确设置
                last_pts = pAVFrame->pts;
            }
            av_packet_unref(pAVPacket);
            av_frame_unref(pAVFrame);
        }
        av_packet_unref(pAVPacket);
        av_frame_unref(pAVFrame);
    }
}
