﻿#include "FormatConvert.h"

extern "C"
{
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavutil/opt.h"
};

FormatConvert::FormatConvert() : QObject(0)
{

}

AVFrame* FormatConvert::convertToFrame(const QVideoFrame &frame,QVideoFrame::PixelFormat _dstFormat)
{
    int width = frame.width();
    int height = frame.height();
    AVPixelFormat srcFormat,dstFromat;
    switch (frame.pixelFormat()) {
    case QVideoFrame::Format_RGB32:
        srcFormat = AV_PIX_FMT_BGRA;
        break;
    default:
        Q_ASSERT_X(false,"error","not support");
        break;
    }

    switch (_dstFormat) {
    case QVideoFrame::Format_YUV420P:
        dstFromat = AV_PIX_FMT_YUV420P;
        break;
    default:
        Q_ASSERT_X(false,"error","not support");
        break;
    }

    struct SwsContext * m_img_convert_ctx = sws_getContext(width, height, srcFormat,
                                                           width, height, dstFromat, SWS_BICUBIC, NULL, NULL, NULL);

    AVFrame * srcFrame = av_frame_alloc();
    srcFrame->format = srcFormat;
    srcFrame->width = width;
    srcFrame->height = height;
    srcFrame->data[0] = (uint8_t*)frame.bits();
    srcFrame->linesize[0] = frame.size().width() * 4;//对于音频，这个参数可能是数据的总长度，但是对于图像则不是这样

    AVFrame *yuvFrame = av_frame_alloc();
    yuvFrame->format = dstFromat;
    yuvFrame->width = width;
    yuvFrame->height = height;

    //分配yuv空间
    av_frame_get_buffer(yuvFrame, 32);
    sws_scale(m_img_convert_ctx,(const uint8_t* const*)srcFrame->data, srcFrame->linesize, 0, height, yuvFrame->data, yuvFrame->linesize);

    av_frame_free(&srcFrame);
    sws_freeContext(m_img_convert_ctx);

    return yuvFrame;
}

/* 转目的宽高 */
AVFrame *FormatConvert::convertToFrame(const QVideoFrame &frame,int dstW,int dstH,QVideoFrame::PixelFormat _dstFormat)
{
    QVideoFrame outframe;

    return nullptr;
}

int audio_rescale_private(AVFrame *srcFrame, int dstSampleRate, int dstChannels, enum AVSampleFormat dstSampleFormat,  uint8_t *outBuf, int *outBufSize, int *outNbSamples,int *linesize)
{
    /* 音频重采样有“上下文相关性”，所以这里设计为c++的最后并释放较好，这里属于不好的C设计 */
    static struct SwrContext *aud_convert_ctx = NULL;
    static int32_t maxNbSample = 0;
    static uint8_t **dstBuffer = NULL;

    if (!aud_convert_ctx) {
        aud_convert_ctx = swr_alloc_set_opts(NULL,
                                             av_get_default_channel_layout(dstChannels),
                                             dstSampleFormat,
                                             dstSampleRate,
                                             av_get_default_channel_layout(srcFrame->channels),
                                             (AVSampleFormat)srcFrame->format,
                                             srcFrame->sample_rate,
                                             0, NULL);
        if (swr_init(aud_convert_ctx) != 0) {
            swr_free(&aud_convert_ctx);
            return -1;
        }

        /* 计算重采样后的样本点个数，由样本点时间相同，推出方程：X/8000 = nb/44100 */
        maxNbSample = av_rescale_rnd(srcFrame->nb_samples, dstSampleRate, srcFrame->sample_rate, AV_ROUND_UP);

        /* linesize [out]*/
        if (av_samples_alloc_array_and_samples(&dstBuffer,linesize,dstChannels,
                                               maxNbSample,(AVSampleFormat)dstSampleFormat,0) < 0){
            printf("alloc fill error");
            return -1;
        }
    }

    int dstNBSamples = av_rescale_rnd(swr_get_delay(aud_convert_ctx,srcFrame->sample_rate) + srcFrame->nb_samples,
                                      dstSampleRate,srcFrame->sample_rate,AV_ROUND_UP);

    if (dstNBSamples > maxNbSample) {
        av_freep(&dstBuffer[0]);
        if (av_samples_alloc(dstBuffer,linesize,dstChannels,dstNBSamples,dstSampleFormat,0) < 0) {
            return -1;
        }
    }

    /* swr_convert是执行重采样后的样本点个数，这个值可能小于理论样本点个数（绝大部分等于理论样本点），也有可能为0。
     * swr_convert返回0时，数据就可以丢弃了，尤其在44100转8000，高转低发生。
    **/
    int ret = -1;
    if ((ret = swr_convert(aud_convert_ctx,
                           dstBuffer, dstNBSamples,
                           (const uint8_t**)srcFrame->data, srcFrame->nb_samples)) < 0) {
        printf("Could not convert input samples\n");
        return -1;
    }

    *outNbSamples = ret;

    /* 由于内存对齐的缘故，av_samples_alloc的返回值，不是真实数据长度(含有内存对其的数据），故需要通过这个来返回最终字节大小*/
    int realSize = av_samples_get_buffer_size(linesize,dstChannels,ret,(AVSampleFormat)dstSampleFormat,1);
    if (realSize < 0) {
        return -1;
    }
    *outBufSize = realSize;

    memcpy(outBuf,dstBuffer[0],realSize);

    //    if(dstBuffer){
    //        av_freep(&dstBuffer[0]);
    //    }
    //    av_freep(&dstBuffer);
    //    swr_free(&aud_convert_ctx);
    return 0;
}

int FormatConvert::audio_rescale(AVFrame *srcFrame, int dstSampleRate, int dstChannels, enum AVSampleFormat dstSampleFormat,  uint8_t *outBuf, int *outBufSize, int *outNbSamples)
{
    return audio_rescale_private(srcFrame,dstSampleRate,dstChannels,dstSampleFormat,outBuf,outBufSize,outNbSamples,NULL);
}


int FormatConvert::audio_rescale(AVFrame *srcFrame,AVFrame *dstFrame)
{
    int linesize = 0;
    int outBufSize = 0;
    int ret = audio_rescale_private(srcFrame,dstFrame->sample_rate,dstFrame->channels,(AVSampleFormat)dstFrame->format,dstFrame->data[0],&outBufSize,&dstFrame->nb_samples,&linesize);

    dstFrame->linesize[0] = linesize;

    return ret;
}

