#include "t_av_packet.h"
#include "t_box_player.h"

T_AV_Packet::T_AV_Packet(T_Box_Player_Context *a_player_ctx, AVPacket &a_av_packet)
    : m_av_packet(a_av_packet)
    , m_player_ctx(a_player_ctx)
    , m_format_ctx(a_player_ctx->m_format_ctx)
{
    m_stream_index = m_av_packet.stream_index;
    m_stream = m_format_ctx->streams[m_stream_index];
    m_codec_ctx = m_stream->codec;
    m_timing =
            1000.0
            * m_av_packet.pts
            * m_stream->time_base.num
            / m_stream->time_base.den
            ;
}

QByteArray T_AV_Packet::decodeAudio()
{
    Q_ASSERT(m_codec_ctx);
    AVPacket v_av_packet = this->m_av_packet;
    QByteArray v_result;
    while(v_av_packet.size > 0)
    {
        int v_dec_len = sizeof(m_player_ctx->m_audio_decode_array);
        int v_enc_len = avcodec_decode_audio3(
                m_codec_ctx,
                (int16_t *)m_player_ctx->m_audio_decode_array,
                &v_dec_len,
                &v_av_packet
                );
        if (v_enc_len < 0)
        {
            /* if error, we skip the frame */
            v_av_packet.size = 0; //force frame finished
        }
        else
        {
            if (m_codec_ctx->sample_fmt == SAMPLE_FMT_S16)
            {
                v_result.append((char *)m_player_ctx->m_audio_decode_array, v_dec_len);
            }
            else
            {
                AVAudioConvert *convCtx =
                        av_audio_convert_alloc(
                                SAMPLE_FMT_S16,
                                1,
                                m_codec_ctx->sample_fmt,
                                1,
                                NULL,
                                0);
                const void *v_ibuf[6]= {m_player_ctx->m_audio_decode_array};
                void *v_obuf[6]= {m_player_ctx->m_audio_convert_array};
                int v_istride[6]= {av_get_bits_per_sample_format(m_codec_ctx->sample_fmt)/8};
                int v_ostride[6]= {av_get_bits_per_sample_format(SAMPLE_FMT_S16)/8};
                int v_len= v_dec_len/v_istride[0];
                if (av_audio_convert(convCtx, v_obuf, v_ostride, v_ibuf, v_istride, v_len)<0)
                {
                    qDebug() << "av_audio_convert() failed";
                    break;
                }
                av_audio_convert_free(convCtx);
                qDebug() << "av_audio_convert() successful";
                v_dec_len = v_len * v_ostride[0];
                v_result.append((char *)m_player_ctx->m_audio_convert_array, v_dec_len);
            }
            v_av_packet.data += v_enc_len;
            v_av_packet.size -= v_enc_len;
        }
    }
    return v_result;
}

bool T_AV_Packet::decodeVideo(SDL_Overlay *a_overlay)
{
    AVFrame *v_frame=avcodec_alloc_frame();
    int v_got_pictur;
    if(avcodec_decode_video2(
            m_codec_ctx, //AVCodecContext *avctx,
            v_frame, //AVFrame *picture,
            &v_got_pictur, //int *got_picture_ptr,
            &m_av_packet //AVPacket *avpkt
            )<0)
    {
        av_free(v_frame);
        return false;
    }
    if(!v_got_pictur)
    {
        av_free(v_frame);
        return false;
    }

    if(!a_overlay)
    {
        av_free(v_frame);
        return true;
    }

    SDL_LockYUVOverlay(a_overlay);

    AVPicture pict;
    memset(&pict,0,sizeof(AVPicture));
    pict.data[0] = a_overlay->pixels[0];
    pict.data[1] = a_overlay->pixels[2];
    pict.data[2] = a_overlay->pixels[1];
    pict.linesize[0] = a_overlay->pitches[0];
    pict.linesize[1] = a_overlay->pitches[2];
    pict.linesize[2] = a_overlay->pitches[1];

    AVPicture pict_src;
    memset(&pict_src,0,sizeof(AVPicture));
    pict_src.data[0] = v_frame->data[0];
    pict_src.data[1] = v_frame->data[1];
    pict_src.data[2] = v_frame->data[2];
    pict_src.linesize[0] = v_frame->linesize[0];
    pict_src.linesize[1] = v_frame->linesize[1];
    pict_src.linesize[2] = v_frame->linesize[2];

    SwsContext *v_sws_ctx = sws_getContext(
            m_codec_ctx->width, //int srcW,
            m_codec_ctx->height, //int srcH,
            m_codec_ctx->pix_fmt, //enum PixelFormat srcFormat,
            a_overlay->w, //int dstW,
            a_overlay->h, //int dstH,
            PIX_FMT_YUV420P, //enum PixelFormat dstFormat,
            SWS_FAST_BILINEAR, //SWS_BICUBIC, //int flags,
            NULL, //SwsFilter *srcFilter,
            NULL, //SwsFilter *dstFilter,
            NULL  //const double *param
            );
    int v_sws_scale_result = sws_scale(
            v_sws_ctx,
            pict_src.data,
            pict_src.linesize,
            0,
            m_codec_ctx->height,
            pict.data,
            pict.linesize
            );
    Q_UNUSED(v_sws_scale_result);
    sws_freeContext(v_sws_ctx);

    SDL_UnlockYUVOverlay(a_overlay);
    av_free(v_frame);
    return true;
}
