#pragma once
#include <QAudioOutput>
#include <QDebug>
#include <QIODevice>
#include <QMetaType>
#include <QQueue>
#include <QScopedPointer>
#include <QSharedPointer>
#include <QThread>
#include "sdp.h"
#include "RtpManager.h"

extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/imgutils.h>
#include <libswresample/swresample.h>
#include <libswscale/swscale.h>
}
class AudioDecoder;
class VideoDecoder;
class AVPacketUniquePtr;

#include <QFile>
#include <QDataStream>
class RtpDecoder : public QObject
{
    Q_OBJECT
signals:
    void initAudioOutput(int sampleRate, int channels);
    void initVideoOutput(int format);

private:
    // QList<AVHWDeviceType> devices; // 设备支持的硬解码器, 在类初始化时遍历获取

    QFile *file;
    int count = 0;

    int audioPayloadType = 0;
    int videoPayloadType = 0;

    void analyzeRtp(const QByteArray &data);

    void decodePacket(AVPacketUniquePtr packet, int payloadType, uint32_t timeStamp);

    QQueue<QByteArray> FU_A_queue;

public:
    RtpDecoder(QObject *parent = nullptr);
    ~RtpDecoder();

    AudioDecoder *audioDecoder{nullptr};
    VideoDecoder *videoDecoder{nullptr};

    void initDecoder(Sdp *sdp);

public slots:
    void recvRtpData(const QByteArray &data, int streamId);
    void recvRtcpData(const QByteArray &data);
};

class AudioDecoder : public QObject
{
    Q_OBJECT
signals:
    void sendAudioBuffer(uint8_t *audioBuffer, int bufferSize, double pts);

private:
    AVCodecContext *codecContext{nullptr};
    SwrContext *swrContext{nullptr};

    int audioStreamIndex;

    double time_base_q2d_ms;

    double lastPts = -1.0;

    void clean();

public:
    AudioDecoder(QObject *parent = nullptr) : QObject(parent) {}
    ~AudioDecoder() = default;

    void initDecoder(AVCodecContext *codec, int streamIndex, double time_base_q2d_ms);
    void initSwrContext();

    // 将音频帧转换为 PCM 格式
    // 返回值: 转换后音频数据的大小
    int transferFrameToPCM(AVFrame *frame, uint8_t *dstBuffer);

    int getAudioStreamIndex() const { return audioStreamIndex; }
    void decodeAudioPacket(AVPacketUniquePtr packet);
};

class VideoDecoder : public QObject
{
    Q_OBJECT
signals:
    void sendVideoFrame(uint8_t *pixelData, int pixelWidth, int pixelHeight, double pts);

private:
    friend class RtpDecoder;
    AVCodecContext *codecContext{nullptr};

    AVBufferRef *hw_device_ctx = nullptr;
    enum AVPixelFormat hw_device_pix_fmt = AV_PIX_FMT_NONE;
    enum AVHWDeviceType hw_device_type = AV_HWDEVICE_TYPE_NONE;

    int videoStreamIndex; // 视频流索引

    double time_base_q2d_ms;

    double lastPts = -1.0;

    void clean();

    // 将硬件解码后的数据拷贝到内存中(但部分数据会消失, 例如pts)
    void transferDataFromHW(AVFrame **frame);

    uint8_t *copyNv12Data(uint8_t **pixelData, int *linesize, int pixelWidth, int pixelHeight);
    uint8_t *copyYuv420pData(uint8_t **pixelData, int *linesize, int pixelWidth, int pixelHeight);
    uint8_t *copyDefaultData(AVFrame *rawFrame);

public:
    VideoDecoder(QObject *parent = nullptr) : QObject(parent) {}
    ~VideoDecoder() = default;

    void initDecoder(AVCodecContext *codec, int streamIndex, double time_base_q2d_ms);

    int getVideoStreamIndex() const { return videoStreamIndex; }
    void decodeVideoPacket(AVPacketUniquePtr packet);

    AVFrame *transFrameToRGB24(AVFrame *frame, int pixelWidth, int pixelHeight);
    AVFrame *transFrameToDstFmt(AVFrame *srcFrame, int pixelWidth, int pixelHeight, AVPixelFormat dstFormat);

    int writeOneFrame(AVFrame *frame, int pixelWidth, int pixelHeight, QString fileName);
};
