#include "encode.h"

#include <QDir>
#include <QScreen>
#include <QGuiApplication>
#include <QAbstractEventDispatcher>
#include <QDebug>
#include <QStandardPaths>
#include <QDateTime>
#include <QBuffer>
#include <QPixmap>

#define RNDTOEVEN(X) (2 * (int(X) / 2))

Encoder::Encoder(QSize resolution, QObject *parent)
    : QObject(parent), m_resolution((resolution / 2) * 2), m_swCtx(NULL)
{
    x264_param_t param;
    x264_param_default_preset(&param, "fast", "zerolatency");
    param.i_threads = QThread::idealThreadCount();
    param.b_sliced_threads = true;
    param.i_width = m_resolution.width();
    param.i_height = m_resolution.height();
    param.b_vfr_input = true;
    param.i_timebase_num = 1;
    param.i_timebase_den = 1000;

    //Rate control:
    param.rc.i_rc_method = X264_RC_CRF;
    param.rc.f_rf_constant = 25;
    param.rc.f_rf_constant_max = 35;

    //    param.b_
    // Bitstream format
    param.b_annexb = 0;
    param.b_repeat_headers = 0;
    param.b_aud = 0;

    x264_param_apply_profile(&param, "baseline");
    m_264enc = x264_encoder_open(&param);
}
QByteArray Encoder::getSequenceHeader(){
    // Create Flv Sequence header
    int i_nals;
    x264_nal_t *nals;
    x264_encoder_headers(m_264enc, &nals, &i_nals);
    QVector<QByteArray> sps, pps;
    for (int i = 0; i < i_nals; ++i)
    {
        switch (nals[i].i_type)
        { // +4, -4, removes the 4 byte size at teh begining for the NALU
        case 7:
            sps.append(QByteArray((const char *)nals[i].p_payload + 4, nals[i].i_payload - 4));
            break;
        case 8:
            pps.append(QByteArray((const char *)nals[i].p_payload + 4, nals[i].i_payload - 4));
            break;
        }
    }

    // TODO make sure we have sps and pps
    QByteArray sequenceHeader;
    sequenceHeader.append(0x01);              // version
    sequenceHeader.append(sps[0][1]);         // profile
    sequenceHeader.append(sps[0][2]);         // compatibility
    sequenceHeader.append(sps[0][3]);         // level
    sequenceHeader.append(0xFC | 3);          // reserved (6 bits), NULA length size - 1 (2 bits)
    sequenceHeader.append(0xE0 | sps.size()); // reserved (3 bits), num of SPS (5 bits)
    for (int i = 0; i < sps.size(); ++i)
    {
        quint16 size = sps[i].size();
        sequenceHeader.append(0xFF & (size >> 8)); // 2 bytes for length of SPS
        sequenceHeader.append(0xFF & (size >> 0)); // 2 bytes for length of SPS
        sequenceHeader.append(sps[i]);
    }

    sequenceHeader.append(pps.size());
    for (int i = 0; i < pps.size(); ++i)
    {
        quint16 size = pps[i].size();
        sequenceHeader.append(0xFF & (size >> 8)); // 2 bytes for length of SPS
        sequenceHeader.append(0xFF & (size >> 0)); // 2 bytes for length of SPS
        sequenceHeader.append(pps[i].data());
    }

    // Write Sequence Header
    QByteArray tag = flvTag(9, 0, 0, true, (quint8 *)sequenceHeader.data(), sequenceHeader.size(), true);
    return tag;
}

QImage Encoder::doGrabFrame()
{
    QScreen *screen = QGuiApplication::primaryScreen();
    QRect rect = screen->geometry();
    QImage frame = screen->grabWindow(0, rect.x(), rect.y(), rect.width(), rect.height()).toImage();
    return frame;
}

Encoder::~Encoder()
{
    // shutdown the encode thread
    qDebug() << "Closing the encoder";

    while (x264_encoder_delayed_frames(m_264enc))
    {
        int i_nals;
        x264_nal_t *nals;
        x264_picture_t pic_out;
        int frame_size = x264_encoder_encode(m_264enc, &nals, &i_nals, 0, &pic_out);
        if (i_nals > 0){
            flvTag(9, pic_out.i_pts, pic_out.i_dts, pic_out.b_keyframe, nals->p_payload, frame_size);
        }
    }

    x264_encoder_close(m_264enc);
    sws_freeContext(m_swCtx);

    m_swCtx = 0;
    m_264enc = 0;
}

QByteArray Encoder::getEncodedFrame(QImage frame, quint32 pts)
{
    const uint8_t *srcSlice[] = {frame.bits()};
    int srcStride = frame.width() * 4;

    x264_picture_t pic_in, pic_out;
    x264_picture_alloc(&pic_in, X264_CSP_I420, m_resolution.width(), m_resolution.height());
    pic_in.i_pts = pts;

    // TODO rebuild this context if (when) screen geomorty changes!
    m_swCtx = sws_getCachedContext(m_swCtx,
                                   frame.width(), frame.height(), AV_PIX_FMT_RGB32,
                                   m_resolution.width(), m_resolution.height(), AV_PIX_FMT_YUV420P,
                                   SWS_LANCZOS | SWS_ACCURATE_RND, NULL, NULL, NULL);
    //  SWS_BICUBIC

    sws_scale(m_swCtx, (const uint8_t *const *)&srcSlice, &srcStride, 0, frame.height(), pic_in.img.plane, pic_in.img.i_stride);

    int i_nals;
    x264_nal_t *nals;
    int frame_size = x264_encoder_encode(m_264enc, &nals, &i_nals, &pic_in, &pic_out);
    QByteArray tag;
    if (frame_size > 0)
    {
        tag = flvTag(9, pic_out.i_pts, pic_out.i_dts, pic_out.b_keyframe, nals->p_payload, frame_size);
    }
    x264_picture_clean(&pic_in);
    return tag;
}

QByteArray Encoder::getFlvHeader(bool hasAudio, bool hasVideo)
{
    QByteArray tag;
    tag.reserve(13);
    tag.append('F');
    tag.append('L');
    tag.append('V');
    tag.append(0x01);                                                // Version
    tag.append((hasAudio ? 0x04 : 0x00) | (hasVideo ? 0x01 : 0x00)); // Flags
    tag.append(char(0));                                             // DataOffset
    tag.append(char(0));                                             // DataOffset
    tag.append(char(0));                                             // DataOffset
    tag.append(0x09);                                                // DataOffset
    tag.append(char(0));                                             // PreviousTagSize
    tag.append(char(0));                                             // PreviousTagSize
    tag.append(char(0));                                             // PreviousTagSize
    tag.append(char(0));                                             // PreviousTagSize
    return tag;
}

QByteArray Encoder::flvTag(quint8 tagType, qint64 pts, qint64 dts, bool keyframe, quint8 *data, int size, bool sequenceHeader)
{
    QByteArray tag;
    tag.reserve(size + 32); // 32 sounds good. Bigest is probablly 20
    tag.append(tagType);
    quint32 dataSize = size;
    switch (tagType)
    {
        case 9:
        {
            dataSize += 5; // 5 is video heaer size
            tag.append(quint8((dataSize >> 16) & 0xFF));
            tag.append(quint8((dataSize >> 8) & 0xFF));
            tag.append(quint8((dataSize >> 0) & 0xFF));

            tag.append(quint8((dts >> 16) & 0xFF));
            tag.append(quint8((dts >> 8) & 0xFF));
            tag.append(quint8((dts >> 0) & 0xFF));
            tag.append(quint8((dts >> 24) & 0xFF));

            tag.append(char(0)); // StreamId
            tag.append(char(0));
            tag.append(char(0));

            tag.append((keyframe ? 0x10 : 0x20) | 0x07); // FrameType + CodecID
            tag.append(sequenceHeader ? 0x00 : 0x01);

            pts = sequenceHeader ? 0 : (pts - dts);
            tag.append((pts >> 16) & 0xFF);
            tag.append((pts >> 8) & 0xFF);
            tag.append((pts >> 0) & 0xFF);
        }
        break;
    }

    tag.append((const char *)data, size);

    dataSize += 11;
    tag.append((dataSize >> 24) & 0xFF);
    tag.append((dataSize >> 16) & 0xFF);
    tag.append((dataSize >> 8) & 0xFF);
    tag.append((dataSize >> 0) & 0xFF);
    return tag;
}
