#ifndef     __VIDEO_DECODER_H
#define     __VIDEO_DECODER_H

#include "./decoder.h"
extern "C" {
#include <libswscale/swscale.h>
}
#include "../utils/image.h"
#include "../utils/frame.h"
#include <QImage>

struct ImageWrap {
    QImage image;
    int64_t pts;
};

class QImageVideoDecoder : public Decoder {

public:
    QImageVideoDecoder(AVStream *stream, int dst_w, int dst_h) : Decoder(stream) {
        m_sws_ctx = sws_getContext(
            m_ctx->width, m_ctx->height, m_ctx->pix_fmt,
            dst_w, dst_h, AV_PIX_FMT_RGB32,
            SWS_BICUBIC, nullptr, nullptr, nullptr
        );
        if (m_sws_ctx == nullptr) {
            throw FFmpegException("sws_getContext is null");
        }
        m_temp_frame = Frame(av_frame_alloc());
        m_width = dst_w;
        m_height = dst_h;
    }

    void set_target_size(int dst_w, int dst_h) {
        sws_freeContext(m_sws_ctx);
        m_sws_ctx = sws_getContext(
            m_ctx->width, m_ctx->height, m_ctx->pix_fmt,
            dst_w, dst_h, AV_PIX_FMT_RGB32,
            SWS_BICUBIC, nullptr, nullptr, nullptr
        );
        if (m_sws_ctx == nullptr) {
            throw FFmpegException("sws_getContext is null2");
        }
        m_width = dst_w;
        m_height = dst_h;
    }

    ~QImageVideoDecoder() {
        sws_freeContext(m_sws_ctx);
    }

    bool receive_image(ImageWrap &image) {
        bool ret = receive_frame(m_temp_frame.frame);
        if (!ret) {
            return ret;
        }
        image.image = QImage(m_width, m_height, QImage::Format_RGB32);
        uint8_t *dest_data[1] = { image.image.bits() };
        int dest_linesize[1] = { image.image.bytesPerLine() };
        sws_scale(
            m_sws_ctx,
            m_temp_frame->data,
            m_temp_frame->linesize,
            0,
            m_temp_frame->height,
            dest_data,
            dest_linesize
        );
        image.pts = m_temp_frame->best_effort_timestamp;
        m_temp_frame.unref();
        return true;
    }

private:
    SwsContext *m_sws_ctx;
    Frame m_temp_frame;
    int m_width;
    int m_height;
};

class VideoDecoder : public Decoder {

public:
    VideoDecoder(AVStream *stream, int dst_w, int dst_h) : Decoder(stream) {
        m_sws_ctx = sws_getContext(
            m_ctx->width, m_ctx->height, m_ctx->pix_fmt,
            dst_w, dst_h, AV_PIX_FMT_YUV420P,
            SWS_BICUBIC, nullptr, nullptr, nullptr
        );
        if (m_sws_ctx == nullptr) {
            throw FFmpegException("sws_getContext is null");
        }
        m_temp_frame = Frame(av_frame_alloc());
        m_width = dst_w;
        m_height = dst_h;
    }

    ~VideoDecoder() {
        sws_freeContext(m_sws_ctx);
    }

    bool receive_image(Image &image) {
        bool ret = receive_frame(m_temp_frame.frame);
        if (!ret) {
            return ret;
        }
        image = Image(m_width, m_height);
        sws_scale(
            m_sws_ctx,
            m_temp_frame->data,
            m_temp_frame->linesize,
            0,
            m_temp_frame->height,
            image.pointers,
            image.linesizes
        );
        image.pts = m_temp_frame->best_effort_timestamp;
        m_temp_frame.unref();
        return true;
    }

private:
    SwsContext *m_sws_ctx;
    Frame m_temp_frame;
    int m_width;
    int m_height;
};

#endif
