#include "decode.h"
#include <QDebug>
#include <QImage>
#include <QPixmap>
#include <QThread>
#include <QtEndian>
#include "Nalu.h"
#define MAX_AUDIO_FRAME_SIZE 192000
QString av_get_pixelformat_name(AVPixelFormat format);
AVPacket *getAVPactFromRtpData(const char *rtpData, int rtpLen);
inline AVPacket *getAVPactFromRtpData(const QByteArray &rtpData) { return getAVPactFromRtpData(rtpData.data(), rtpData.size()); }

#pragma region unique_ptr
void packet_deleter(AVPacket *pkt) { av_packet_free(&pkt); }
// 自动调用av_packet_alloc和av_packet_free
class AVPacketUniquePtr : public std::unique_ptr<AVPacket, decltype(packet_deleter) *>
{
public:
    AVPacketUniquePtr() : std::unique_ptr<AVPacket, decltype(packet_deleter) *>(av_packet_alloc(), packet_deleter) {}
    AVPacketUniquePtr(AVPacket *p) : std::unique_ptr<AVPacket, decltype(packet_deleter) *>(p, packet_deleter) {}
    // 禁用拷贝
    AVPacketUniquePtr(const AVPacketUniquePtr &) = delete;
    // 允许移动
    AVPacketUniquePtr(AVPacketUniquePtr &&) = default;
    AVPacketUniquePtr &operator=(const AVPacketUniquePtr &) = delete;
    AVPacketUniquePtr &operator=(AVPacketUniquePtr &&other) = default;
    AVPacketUniquePtr &operator=(AVPacket *other) { return *this = AVPacketUniquePtr(other); }
};

void frame_deleter(AVFrame *frame) { av_frame_free(&frame); }
class AVFrameUniquePtr : public std::unique_ptr<AVFrame, decltype(frame_deleter) *>
{
public:
    AVFrameUniquePtr() : std::unique_ptr<AVFrame, decltype(frame_deleter) *>(av_frame_alloc(), frame_deleter) {}

    AVFrameUniquePtr(AVFrame *p) : std::unique_ptr<AVFrame, decltype(frame_deleter) *>(p, frame_deleter) {}

    AVFrameUniquePtr(const AVFrameUniquePtr &) = delete; // 禁用拷贝
    AVFrameUniquePtr(AVFrameUniquePtr &&) = default;     // 允许移动

    AVFrameUniquePtr &operator=(const AVFrameUniquePtr &) = delete;
    AVFrameUniquePtr &operator=(AVFrameUniquePtr &&other) = default;
    AVFrameUniquePtr &operator=(AVFrame *other) { return *this = AVFrameUniquePtr(other); }
};
class AVCodecParametersUniquePtr : public std::unique_ptr<AVCodecParameters, void (*)(AVCodecParameters *)>
{
public:
    AVCodecParametersUniquePtr() : std::unique_ptr<AVCodecParameters, void (*)(AVCodecParameters *)>(avcodec_parameters_alloc(), [](AVCodecParameters *p)
                                                                                                     { avcodec_parameters_free(&p); }) {}

    // 禁用拷贝
    AVCodecParametersUniquePtr(const AVCodecParametersUniquePtr &) = delete;
    // 允许移动
    AVCodecParametersUniquePtr(AVCodecParametersUniquePtr &&) = default;
};
#pragma endregion

#pragma region init
void initCodec(AVCodecContext **codecContext, AVCodecParameters *codecpar)
{
    const AVCodec *codec = avcodec_find_decoder(codecpar->codec_id);
    *codecContext = avcodec_alloc_context3(codec);
    avcodec_parameters_to_context(*codecContext, codecpar);
    avcodec_open2(*codecContext, codec, nullptr);
}

RtpDecoder::RtpDecoder(QObject *parent) : QObject(parent)
{
    file = new QFile("../RTSP_Client_Qt/bin/test.h264", this);
    audioDecoder = new AudioDecoder(this);
    videoDecoder = new VideoDecoder(this);
}

RtpDecoder::~RtpDecoder()
{
    if (file->isOpen())
    {
        file->flush();
        file->close();
    }
}

void RtpDecoder::initDecoder(Sdp *sdp)
{
    qDebug() << "initDecoder";
    for (int index = 0; index < TRACK_MAX_NUM; index++)
    {
        const SdpTrack &curTrack = sdp->tracks[index];
        if (curTrack.getAliveStatus() == true)
        {
            AVCodecParametersUniquePtr codecpar;

            if (curTrack.getControlId() == sdp->videoStreamId)
            {
                videoPayloadType = curTrack.getPayload();
                double time_base_q2d_ms = 1 / (double)curTrack.getVideoTimebase() * 1000;

                codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
                codecpar->codec_id = AV_CODEC_ID_H264; // 根据SDP的a=rtpmap字段确定
                {
                    QByteArray extradata = sdp->getVideoCodeccparExtradata();
                    codecpar->extradata = (uint8_t *)av_malloc(extradata.size());
                    codecpar->extradata_size = extradata.size();
                    memcpy(codecpar->extradata, extradata.constData(), extradata.size());

                    if (file->open(QIODevice::WriteOnly | QIODevice::Truncate))
                    {
                        file->write(extradata);
                        file->flush();
                    }
                    else
                    {
                        qDebug() << "file open failed";
                    }
                }

                // codecpar->width = 1280;			   // 从SDP的a=fmtp获取分辨率
                // codecpar->height = 720;
                AVCodecContext *codecContext = nullptr;
                initCodec(&codecContext, codecpar.get());
                videoDecoder->initDecoder(codecContext, sdp->videoStreamId, time_base_q2d_ms);
                emit initVideoOutput(AV_HWDEVICE_TYPE_NONE);
            }
            else if (curTrack.getControlId() == sdp->audioStreamId)
            {
                audioPayloadType = curTrack.getPayload();
                double time_base_q2d_ms = 1 / (double)curTrack.getAudioSampleRate() * 1000;

                codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
                codecpar->codec_id = AV_CODEC_ID_AAC; // 根据SDP的a=rtpmap字段确定
                codecpar->sample_rate = curTrack.getAudioSampleRate();
                codecpar->ch_layout.nb_channels = curTrack.getAudioChannel();

                AVCodecContext *codecContext = nullptr;
                initCodec(&codecContext, codecpar.get());
                audioDecoder->initDecoder(codecContext, sdp->audioStreamId, time_base_q2d_ms);
                emit initAudioOutput(codecpar->sample_rate, codecpar->ch_layout.nb_channels);
            }
        }
    }
}
#pragma endregion

void RtpDecoder::recvRtcpData(const QByteArray &rtcpData)
{
}

void RtpDecoder::recvRtpData(const QByteArray &rtpData, int streamId)
{
    if (streamId == videoDecoder->videoStreamIndex)
        analyzeRtp(rtpData);
}

void RtpDecoder::analyzeRtp(const QByteArray &rtpData)
{
    RtpHeader rtpHeader = *(RtpHeader *)rtpData.data();
    uint32_t timeStamp = qFromBigEndian(rtpHeader.timestamp);

    const char *rtpPayload = rtpData.data() + sizeof(RtpHeader);
    int len = rtpData.size() - sizeof(RtpHeader);

    NalHeader::NalUnitType type = static_cast<NalHeader::NalUnitType>(rtpPayload[0] & 0x1F);
    switch (type)
    {
    case NalHeader::NalUnitType::NAL_UNKNOW:
        qDebug() << "NAL_UNKNOW";
        break;
    case NalHeader::NalUnitType::NAL_SLICE:     // P帧
    case NalHeader::NalUnitType::NAL_SLICE_IDR: // I帧
    case NalHeader::NalUnitType::NAL_SPS:
    case NalHeader::NalUnitType::NAL_PPS:
        decodePacket(getAVPactFromRtpData(rtpPayload, len), 0, 0);
        break;
    case NalHeader::NalUnitType::NAL_FU_A: // NAL_FU_A = FU_indicator(NalHeader) + FU_header + FU_payload
    {
        NalHeader nalHeader = *(NalHeader *)rtpPayload;
        FUHeader fuHeader = *(FUHeader *)(rtpPayload + sizeof(NalHeader));
        const char *fuPayload = rtpPayload + sizeof(NalHeader) + sizeof(FUHeader);
        int fuPayloadLen = len - sizeof(NalHeader) - sizeof(FUHeader);

        if (fuHeader.start_bit == 1)
        {
            FU_A_queue.clear();
            // 构建新的NAL头：保留F/NRI位，替换为实际类型
            uint8_t newNalType = (nalHeader.f << 7) | (nalHeader.nri << 5) | fuHeader.type;
            FU_A_queue.enqueue(QByteArray((char *)&newNalType, 1));
        }
        else if (FU_A_queue.isEmpty()) // && fuHeader.start_bit == 0)
        {
            qDebug() << "NAL_FU_A start bit is 0 but queue is empty";
            return;
        }

        if (fuPayloadLen > 0)
        {
            FU_A_queue.enqueue(QByteArray(fuPayload, fuPayloadLen));
        }

        if (fuHeader.end_bit == 1)
        {
            QByteArray nalUnit = FU_A_queue.dequeue();
            uint8_t nalNri = (uint8_t)(nalUnit.data()[0]) & 0x60;
            if (nalNri == 0x60)
            {
                // qDebug() << "I frame" << (nalUnit.data()[0] & 0x1f);
            }
            else if (nalNri == 0x40)
            {
                // qDebug() << "P frame" << (nalUnit.data()[0] & 0x1f);
            }
            else
            {
                // qDebug() << "B frame" << (nalUnit.data()[0] & 0x1f);
            }

            while (!FU_A_queue.isEmpty())
            {
                nalUnit += FU_A_queue.dequeue();
            }
            decodePacket(getAVPactFromRtpData(nalUnit), 0, 0);
        }
        break;
    }
    default:
        qDebug() << "NAL_OTHER" << (uint8_t)type;
        break;
    }
}

AVPacket *getAVPactFromRtpData(const char *rtpData, int rtpLen)
{
    AVPacket *ret = av_packet_alloc();

    static const uint8_t start_code[4] = {0x00, 0x00, 0x00, 0x01};
    av_new_packet(ret, rtpLen + 4);
    memcpy(ret->data, start_code, 4);
    memcpy(ret->data + 4, rtpData, rtpLen);

    return ret;
}
void RtpDecoder::decodePacket(AVPacketUniquePtr pkt, int payloadType, uint32_t timeStamp)
{
    pkt->pts = 0;
    if (file->isOpen() && count < 200)
    {
        count++;
        file->write((char *)pkt->data, pkt->size);
    }
    else if (count == 200)
    {
        qDebug() << "--------------------------------------------------------------------------------------------------------------------------------------------------";
        file->close();
        count++;
    }

    videoDecoder->decodeVideoPacket(std::move(pkt));
    // if (payloadType == videoPayloadType)
    // {
    //     // av_rescale_q(timeStamp, AVRational{1, 90000}, pkt->pts);
    // }
    // else if (payloadType == audioPayloadType)
    // {
    //     qDebug() << "audio payload type:" << payloadType;
    //     // audioDecoder->decodeAudioPacket(std::move(pkt));
    // }
    // else
    // {
    //     qDebug() << "unknown stream index" << payloadType << videoPayloadType << audioPayloadType;
    // }
}

void AudioDecoder::clean()
{
    // 倒着清理
    if (swrContext)
        swr_free(&swrContext);

    if (codecContext)
        avcodec_free_context(&codecContext);
}

void AudioDecoder::initDecoder(AVCodecContext *codec, int streamIndex, double time_base_q2d_ms)
{
    this->codecContext = codec;
    this->audioStreamIndex = streamIndex;
    this->time_base_q2d_ms = time_base_q2d_ms;
    initSwrContext();
}

void AudioDecoder::initSwrContext()
{
    if (0 != swr_alloc_set_opts2(&swrContext,
                                 &codecContext->ch_layout, AV_SAMPLE_FMT_S16,
                                 codecContext->sample_rate,
                                 &codecContext->ch_layout,
                                 codecContext->sample_fmt,
                                 codecContext->sample_rate,
                                 0, nullptr))
    {
        qDebug() << "swr_alloc_set_opts2 failed";
    }

    if (!swrContext || swr_init(swrContext) < 0)
        qDebug() << "swr_init failed";
}

int AudioDecoder::transferFrameToPCM(AVFrame *frame, uint8_t *dstBuffer)
{
    int64_t out_nb_samples = av_rescale_rnd(
        swr_get_delay(swrContext, frame->sample_rate) + frame->nb_samples,
        44100,
        frame->sample_rate,
        AV_ROUND_UP);

    int convertedSize = swr_convert( // 返回转换出的数据大小
        swrContext,                  // 转换工具
        &dstBuffer,                  // 输出
        out_nb_samples,              // 输出样本数
        frame->data,                 // 输入
        frame->nb_samples);          // 输入样本数

    return convertedSize;
}

void AudioDecoder::decodeAudioPacket(AVPacketUniquePtr packet)
{
    // 将音频帧发送到音频解码器
    if (avcodec_send_packet(codecContext, packet.get()) == 0)
    {
        AVFrameUniquePtr frame;
        if (avcodec_receive_frame(codecContext, frame.get()) == 0)
        {
            std::unique_ptr<uint8_t[]> convertedAudioBuffer(new uint8_t[MAX_AUDIO_FRAME_SIZE]);
            int convertedSize = transferFrameToPCM(frame.get(), convertedAudioBuffer.get());

            if (convertedSize > 0)
            {
                int channels = frame->ch_layout.nb_channels;
                int bufferSize = av_samples_get_buffer_size(nullptr, channels, convertedSize, AV_SAMPLE_FMT_S16, 1);
                double framePts = time_base_q2d_ms * frame->pts;

                lastPts = framePts;
                // 将转换后的音频数据发送到音频播放器
                emit sendAudioBuffer(convertedAudioBuffer.release(), bufferSize, framePts);
            }
            else
            {
                qDebug() << "in audio decode frame error";
            }
        }
        else
        {
            qDebug() << "in audio decode frame error";
        }
    }
}

void VideoDecoder::clean()
{
    hw_device_pix_fmt = AV_PIX_FMT_NONE;

    if (codecContext)
        avcodec_free_context(&codecContext);
}

void VideoDecoder::initDecoder(AVCodecContext *codec, int streamIndex, double time_base_q2d_ms)
{
    this->codecContext = codec;
    this->videoStreamIndex = streamIndex;
    this->time_base_q2d_ms = time_base_q2d_ms;
}

void VideoDecoder::decodeVideoPacket(AVPacketUniquePtr packet)
{
    if (avcodec_send_packet(codecContext, packet.get()) == 0)
    {
        auto frame = av_frame_alloc();
        int ret = avcodec_receive_frame(codecContext, frame);
        if (ret == 0)
        {
            double framePts = time_base_q2d_ms * frame->pts;

            if (hw_device_type != AV_HWDEVICE_TYPE_NONE)
                transferDataFromHW(&frame);

            uint8_t *pixelData = nullptr;
            switch (frame->format)
            {
            case AV_PIX_FMT_YUV420P:
                pixelData = copyYuv420pData(frame->data, frame->linesize, frame->width, frame->height);
                break;
            case AV_PIX_FMT_NV12:
                pixelData = copyNv12Data(frame->data, frame->linesize, frame->width, frame->height);
                break;
            default:
                pixelData = copyDefaultData(frame);
                break;
            }

            lastPts = framePts;
            emit sendVideoFrame(pixelData, frame->width, frame->height, framePts);
        }
        else if (ret != AVERROR(EAGAIN))
        {
            qDebug() << "avcodec_receive_frame fail: " << ret;
        }
        else
        {
            // qDebug() << "avcodec_receive_frame EAGAIN";
        }
        av_frame_free(&frame);
    }
    else
    {
        qDebug() << "avcodec_send_packet fail";
    }
}

void VideoDecoder::transferDataFromHW(AVFrame **frame)
{
    // 如果采用的硬件加速, 解码后的数据还在GPU中, 所以需要通过av_hwframe_transfer_data将GPU中的数据转移到内存中
    // GPU解码数据格式固定为NV12, 来源: https://blog.csdn.net/qq_23282479/article/details/118993650
    AVFrameUniquePtr tmp_frame;
    if (0 > av_hwframe_transfer_data(tmp_frame.get(), *frame, 0))
    {
        qDebug() << "av_hwframe_transfer_data fail";
        return;
    }

    av_frame_free(frame);

    if (tmp_frame->format != AV_PIX_FMT_NV12)
    { // 如果不是 NV12 格式, 则转换为NV12格式, 临时结构, 后续再重新梳理
        *frame = transFrameToDstFmt(tmp_frame.get(), tmp_frame->width, tmp_frame->height, AV_PIX_FMT_NV12);
    }
    else
    {
        *frame = tmp_frame.release();
    }
}

uint8_t *VideoDecoder::copyNv12Data(uint8_t **pixelData, int *linesize, int pixelWidth, int pixelHeight)
{
    uint8_t *pixel = new uint8_t[pixelWidth * pixelHeight * 3 / 2];
    uint8_t *y = pixel;
    uint8_t *uv = pixel + pixelWidth * pixelHeight;

    int halfHeight = pixelHeight >> 1;
    for (int i = 0; i < pixelHeight; i++)
    {
        memcpy(y + i * pixelWidth, pixelData[0] + i * linesize[0], static_cast<size_t>(pixelWidth));
    }
    for (int i = 0; i < halfHeight; i++)
    {
        memcpy(uv + i * pixelWidth, pixelData[1] + i * linesize[1], static_cast<size_t>(pixelWidth));
    }
    return pixel;
}

uint8_t *VideoDecoder::copyYuv420pData(uint8_t **pixelData, int *linesize, int pixelWidth, int pixelHeight)
{
    uint8_t *pixel = new uint8_t[pixelHeight * pixelWidth * 3 / 2];
    int halfWidth = pixelWidth >> 1;
    int halfHeight = pixelHeight >> 1;
    uint8_t *y = pixel;
    uint8_t *u = pixel + pixelWidth * pixelHeight;
    uint8_t *v = pixel + pixelWidth * pixelHeight + halfWidth * halfHeight;
    for (int i = 0; i < pixelHeight; i++)
    {
        memcpy(y + i * pixelWidth, pixelData[0] + i * linesize[0], static_cast<size_t>(pixelWidth));
    }
    for (int i = 0; i < halfHeight; i++)
    {
        memcpy(u + i * halfWidth, pixelData[1] + i * linesize[1], static_cast<size_t>(halfWidth));
    }
    for (int i = 0; i < halfHeight; i++)
    {
        memcpy(v + i * halfWidth, pixelData[2] + i * linesize[2], static_cast<size_t>(halfWidth));
    }
    return pixel;
}

uint8_t *VideoDecoder::copyDefaultData(AVFrame *frame)
{
    AVFrameUniquePtr dstFrame = transFrameToDstFmt(frame, frame->width, frame->height, AV_PIX_FMT_YUV420P);
    return copyYuv420pData(dstFrame->data, frame->linesize, frame->width, frame->height);
}

AVFrame *VideoDecoder::transFrameToRGB24(AVFrame *srcFrame, int pixelWidth, int pixelHeight)
{
    AVFrame *frameRGB = av_frame_alloc();
    if (srcFrame->format == AV_PIX_FMT_RGB24)
    {
        av_frame_copy(frameRGB, srcFrame);
    }
    else
    {
        auto swsContext = sws_getContext(pixelWidth, pixelHeight, (AVPixelFormat)srcFrame->format, pixelWidth, pixelHeight, AV_PIX_FMT_RGB24, SWS_BICUBIC, nullptr, nullptr, nullptr);

        av_image_alloc(frameRGB->data, frameRGB->linesize, pixelWidth, pixelHeight, AV_PIX_FMT_RGB24, 1);
        sws_scale(swsContext, srcFrame->data, srcFrame->linesize, 0, pixelHeight, frameRGB->data, frameRGB->linesize);

        sws_freeContext(swsContext);
    }
    return frameRGB;
}

AVFrame *VideoDecoder::transFrameToDstFmt(AVFrame *srcFrame, int pixelWidth, int pixelHeight, AVPixelFormat dstFormat)
{
    AVFrame *dstFrame = av_frame_alloc();
    SwsContext *swsContext = sws_getContext(pixelWidth, pixelHeight, AVPixelFormat(srcFrame->format),
                                            pixelWidth, pixelHeight, dstFormat,
                                            SWS_BILINEAR, NULL, NULL, NULL);

    int numBytes = av_image_get_buffer_size(dstFormat, pixelWidth, pixelHeight, 1);
    uint8_t *buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));

    av_image_fill_arrays(dstFrame->data, dstFrame->linesize, buffer, dstFormat, pixelWidth, pixelHeight, 1);

    sws_scale(swsContext, srcFrame->data, srcFrame->linesize, 0, pixelHeight, dstFrame->data, dstFrame->linesize);

    dstFrame->format = dstFormat;
    dstFrame->width = pixelWidth;
    dstFrame->height = pixelHeight;
    return dstFrame;
}

int VideoDecoder::writeOneFrame(AVFrame *frame, int pixelWidth, int pixelHeight, QString fileName)
{
    if (frame == nullptr)
        return -1;

    if (frame->format != AV_PIX_FMT_RGB24)
    {
        AVFrameUniquePtr frameRGB = transFrameToRGB24(frame, pixelWidth, pixelHeight);
        QImage(frameRGB->data[0], pixelWidth, pixelHeight, frameRGB->linesize[0], QImage::Format_RGB888).save(fileName);
    }
    else
    {
        QImage(frame->data[0], pixelWidth, pixelHeight, frame->linesize[0], QImage::Format_RGB888).save(fileName);
    }

    return 0;
}

QString av_get_pixelformat_name(AVPixelFormat format)
{
    QString name;
    switch (format)
    {
    case AV_PIX_FMT_NONE:
        name = "AV_PIX_FMT_NONE";
        break;
    case AV_PIX_FMT_YUV420P:
        name = "AV_PIX_FMT_YUV420P";
        break;
    case AV_PIX_FMT_NV12:
        name = "AV_PIX_FMT_NV12";
        break;
    case AV_PIX_FMT_VAAPI:
        name = "AV_PIX_FMT_VAAPI";
        break;
    case AV_PIX_FMT_VIDEOTOOLBOX:
        name = "AV_PIX_FMT_VIDEOTOOLBOX";
        break;
    case AV_PIX_FMT_MEDIACODEC:
        name = "AV_PIX_FMT_MEDIACODEC";
        break;
    case AV_PIX_FMT_D3D11:
        name = "AV_PIX_FMT_D3D11";
        break;
    case AV_PIX_FMT_OPENCL:
        name = "AV_PIX_FMT_OPENCL";
        break;
    case AV_PIX_FMT_VULKAN:
        name = "AV_PIX_FMT_VULKAN";
        break;
    case AV_PIX_FMT_D3D12:
        name = "AV_PIX_FMT_D3D12";
        break;
    case AV_PIX_FMT_DXVA2_VLD:
        name = "AV_PIX_FMT_DXVA2_VLD";
        break;
    case AV_PIX_FMT_VDPAU:
        name = "AV_PIX_FMT_VDPAU";
        break;
    case AV_PIX_FMT_QSV:
        name = "AV_PIX_FMT_QSV";
        break;
    case AV_PIX_FMT_MMAL:
        name = "AV_PIX_FMT_MMAL";
        break;
    case AV_PIX_FMT_D3D11VA_VLD:
        name = "AV_PIX_FMT_D3D11VA_VLD";
        break;
    case AV_PIX_FMT_CUDA:
        name = "AV_PIX_FMT_CUDA";
        break;

    default:
        name = "value:" + QString::number(format);
        break;
    }
    return name;
}
