/*
 * 此程序解码一个mp3文件，可以将解码后音频文件保存成pcm文件，以及播放解码后数据。
 * 使用ffplay播放: ffplay -ar 44100 -ac 2 -f f32le -i test.pcm
 * ar为audio rate，ac为audio channel ，f32le为float 32位小端数据格式。
 */

#include "decodeplayaudio.h"

#ifdef __cplusplus
#define __STDC_CONSTANT_MACROS
#define UINT64_C
extern "C"
{
#endif
#include "libavcodec/avcodec.h"
#include "libavfilter/avfilter.h"
#include "libavformat/avformat.h"
#include <libavutil/pixfmt.h>
#include "libavutil/avutil.h"
#include "libavutil/ffversion.h"
#include "libswresample/swresample.h"
#include "libswscale/swscale.h"
#include "libpostproc/postprocess.h"
#include <libavdevice/avdevice.h>
#include <SDL2/SDL.h>
#include <SDL2/SDL_audio.h>
#include <SDL2/SDL_types.h>
#include <SDL2/SDL_name.h>
#include <SDL2/SDL_main.h>
#include <SDL2/SDL_config.h>
#ifdef __cplusplus
}
#endif

#include <QString>
#include <QByteArray>
#include <QDebug>
#include <QFile>
#include <QAudioFormat>
#include <QAudioOutput>
#include <QTimer>
#include <QTest>

#include <stdio.h>

#define PLAYTYPE 0  //0为使用QAudioOutput播放，1为使用SDL播放。

#define SDL_AUDIO_BUFFER_SIZE 1024
#define MAX_AUDIO_FRAME_SIZE 192000  //1 second of 48khz 32bit audio

#if PLAYTYPE  //使用SDL播放。

/* SDL播放音频是通过回调函数的方式播放，且这个回调函数是在新的线程中运行，
 * 此回调函数固定时间激发一次，这个时间和要播放的音频频率有关系。
 * 因此我们用FFMPEG读到一帧音频后，不是急着解码，而是将数据存入一个队列，
 * 等SDL回调函数激发的时候，从这个队列中取出数据，然后解码播放。
 */

typedef struct PacketQueue {
    AVPacketList *first_pkt, *last_pkt;
    int nb_packets;
    int size;
    SDL_mutex *mutex;
    SDL_cond *cond;
} PacketQueue;

PacketQueue *audioq;
AVFrame* audioFrame = av_frame_alloc();

void packet_queue_init(PacketQueue *q)
{
    memset(q, 0, sizeof(PacketQueue));
    q->mutex = SDL_CreateMutex();
    q->cond = SDL_CreateCond();
}

int packet_queue_put(PacketQueue *q, AVPacket *pkt)
{
    AVPacketList *pkt1;
    if (av_dup_packet(pkt) < 0) {
        return -1;
    }

    pkt1 = (AVPacketList*)av_malloc(sizeof(AVPacketList));
    if (!pkt1)
        return -1;
    pkt1->pkt = *pkt;
    pkt1->next = NULL;

    SDL_LockMutex(q->mutex);

    if (!q->last_pkt)
        q->first_pkt = pkt1;
    else
        q->last_pkt->next = pkt1;
    q->last_pkt = pkt1;
    q->nb_packets++;
    q->size += pkt1->pkt.size;
    SDL_CondSignal(q->cond);

    SDL_UnlockMutex(q->mutex);
    return 0;
}

static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
{
    AVPacketList *pkt1;
    int ret;

    SDL_LockMutex(q->mutex);

    for (;;)
    {
        pkt1 = q->first_pkt;
        if (pkt1) {
            q->first_pkt = pkt1->next;
            if (!q->first_pkt)
                q->last_pkt = NULL;
            q->nb_packets--;
            q->size -= pkt1->pkt.size;
            *pkt = pkt1->pkt;
            av_free(pkt1);
            ret = 1;
            break;
        } else if (!block) {
            ret = 0;
            break;
        } else {
            SDL_CondWait(q->cond, q->mutex);
        }
    }

    SDL_UnlockMutex(q->mutex);
    return ret;
}

int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size)
{
    static AVPacket pkt;
    static uint8_t *audio_pkt_data = NULL;
    static int audio_pkt_size = 0;
    int len1, data_size;

    for(;;)
    {
        if(packet_queue_get(audioq, &pkt, 1) < 0)
        {
            return -1;
        }

        audio_pkt_data = pkt.data;
        audio_pkt_size = pkt.size;

        while(audio_pkt_size > 0)
        {
            int got_picture;

            int ret = avcodec_decode_audio4( aCodecCtx, audioFrame, &got_picture, &pkt);
            if( ret < 0 ) {
                printf("Error in decoding audio frame.\n");
                exit(0);
            }

            if( got_picture ) {
                int in_samples = audioFrame->nb_samples;
                short *sample_buffer = (short*)malloc(audioFrame->nb_samples * 2 * 2);
                memset(sample_buffer, 0, audioFrame->nb_samples * 4);

                int i=0;
                float *inputChannel0 = (float*)(audioFrame->extended_data[0]);

                // Mono
                if( audioFrame->channels == 1 ) {
                    for( i=0; i<in_samples; i++ ) {
                        float sample = *inputChannel0++;
                        if( sample < -1.0f ) {
                            sample = -1.0f;
                        } else if( sample > 1.0f ) {
                            sample = 1.0f;
                        }

                        sample_buffer[i] = (int16_t)(sample * 32767.0f);
                    }
                } else { // Stereo
                    float* inputChannel1 = (float*)(audioFrame->extended_data[1]);
                    for( i=0; i<in_samples; i++) {
                        sample_buffer[i*2] = (int16_t)((*inputChannel0++) * 32767.0f);
                        sample_buffer[i*2+1] = (int16_t)((*inputChannel1++) * 32767.0f);
                    }
                }
//                fwrite(sample_buffer, 2, in_samples*2, pcmOutFp);
                memcpy(audio_buf,sample_buffer,in_samples*4);
                free(sample_buffer);
            }

            audio_pkt_size -= ret;

            if (audioFrame->nb_samples <= 0)
            {
                continue;
            }

            data_size = audioFrame->nb_samples * 4;
            return data_size;
        }

        if(pkt.data)
            av_free_packet(&pkt);
    }
}

/*
 * 回调函数的后2个参数stream和len不需要用户传递，是由SDL库在需要时自动管理的。
 * 在回调函数里，调用SDL_MixAudio时，需使用这2个参数，SDL库会自动分配缓冲区及决定大小。
 * 使用者只需一个指向pcm数据的缓冲区的全局变量指针传给SDL_MixAudio()函数。
 */
void  audio_callback(void *userdata, Uint8 *stream, int len)
{
    AVCodecContext *aCodecCtx = (AVCodecContext *) userdata;
    int len1, audio_data_size;

    static uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2];
    static unsigned int audio_buf_size = 0;
    static unsigned int audio_buf_index = 0;

    /* len是由SDL传入的SDL缓冲区的大小，如果这个缓冲未满，我们就一直往里填充数据 */
    while (len > 0)
    {
        /* audio_buf_index 和 audio_buf_size 标示我们自己用来放置解码出来的数据的缓冲区，
         * 这些数据待copy到SDL缓冲区， 当audio_buf_index >= audio_buf_size的时候意味着我
         * 们的缓冲为空，没有数据可供copy，这时候需要调用audio_decode_frame来解码出更多的桢数据
         */

        if (audio_buf_index >= audio_buf_size)
        {
            audio_data_size = audio_decode_frame(aCodecCtx, audio_buf,sizeof(audio_buf));
            /* audio_data_size < 0 标示没能解码出数据，我们默认播放静音 */
            if (audio_data_size < 0) {
                /* silence */
                audio_buf_size = 1024;
                /* 清零，静音 */
                memset(audio_buf, 0, audio_buf_size);
            } else {
                audio_buf_size = audio_data_size;
            }
            audio_buf_index = 0;
        }

        /*  查看stream可用空间，决定一次copy多少数据，剩下的下次继续copy */
        len1 = audio_buf_size - audio_buf_index;
        if (len1 > len) {
            len1 = len;
        }

        memcpy(stream, (uint8_t *) audio_buf + audio_buf_index, len1);
        len -= len1;
        stream += len1;
        audio_buf_index += len1;
    }
}

#endif

int main_decode_play_audio(const char * src)
{
//    QString _url="/home/action/huadie.mp3";

    const char outFileName[] = "test.pcm";
    FILE *file=fopen(outFileName,"w+b");
    if(!file){
        printf("Cannot open output file.\n");
        return -1;
    }

    AVFormatContext *fmtCtx = avformat_alloc_context();
    AVCodecContext *codecCtx = NULL;
    AVPacket *pkt = av_packet_alloc();
    AVFrame *frame = av_frame_alloc();

    int aStreamIndex = -1;

    if(avformat_open_input(&fmtCtx,src,NULL,NULL)<0){
        qDebug("Cannot open input file.");
        return -1;
    }
    if(avformat_find_stream_info(fmtCtx,NULL)<0){
        qDebug("Cannot find any stream in file.");
        return -1;
    }

    av_dump_format(fmtCtx,0,src,0);

    for(size_t i=0;i<fmtCtx->nb_streams;i++){
        if(fmtCtx->streams[i]->codecpar->codec_type==AVMEDIA_TYPE_AUDIO){
            aStreamIndex=(int)i;
            break;
        }
    }
    if(aStreamIndex==-1){
        qDebug("Cannot find audio stream.");
        return -1;
    }

    AVCodecParameters *aCodecPara = fmtCtx->streams[aStreamIndex]->codecpar;
    AVCodec *codec = avcodec_find_decoder(aCodecPara->codec_id);
    if(!codec){
        qDebug("Cannot find any codec for audio.");
        return -1;
    }

    codecCtx = avcodec_alloc_context3(codec);
    if(avcodec_parameters_to_context(codecCtx,aCodecPara)<0){
        qDebug("Cannot alloc codec context.");
        return -1;
    }
    codecCtx->pkt_timebase = fmtCtx->streams[aStreamIndex]->time_base;

    if(avcodec_open2(codecCtx,codec,NULL)<0){
        qDebug("Cannot open audio codec.");
        return -1;
    }

    //设置转码参数
    uint64_t out_channel_layout = codecCtx->channel_layout;
    enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;
    int out_sample_rate = codecCtx->sample_rate;
    int out_channels = av_get_channel_layout_nb_channels(out_channel_layout);
    //printf("out rate : %d , out_channel is: %d\n",out_sample_rate,out_channels);

    uint8_t *audio_out_buffer = (uint8_t*)av_malloc(MAX_AUDIO_FRAME_SIZE*2);

    SwrContext *swr_ctx = swr_alloc_set_opts(NULL,
                                             out_channel_layout,
                                             out_sample_fmt,
                                             out_sample_rate,
                                             codecCtx->channel_layout,
                                             codecCtx->sample_fmt,
                                             codecCtx->sample_rate,
                                             0,NULL);
    swr_init(swr_ctx);

#if PLAYTYPE
    if(SDL_Init(SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        printf( "Could not initialize SDL - %s\n", SDL_GetError());
        return -1;
    }

    SDL_LockAudio();
    SDL_AudioSpec spec;
    SDL_AudioSpec wanted_spec;
    wanted_spec.freq = codecCtx->sample_rate;
    wanted_spec.format = AUDIO_S16SYS;
    wanted_spec.channels = codecCtx->channels;
    wanted_spec.silence = 0;
    wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
    wanted_spec.callback = audio_callback;
    wanted_spec.userdata = codecCtx;
    if(SDL_OpenAudio(&wanted_spec, &spec) < 0)
    {
        fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
        return false;
    }
    SDL_UnlockAudio();
    SDL_PauseAudio(0);

    printf("比特率 %3d\n", fmtCtx->bit_rate);
    printf("解码器名称 %s\n", codecCtx->codec->long_name);
    printf("time_base  %d \n", codecCtx->time_base);
    printf("声道数  %d \n", codecCtx->channels);
    printf("sample per second  %d \n", codecCtx->sample_rate);

    //初始化音频队列
    audioq = new PacketQueue;
    packet_queue_init(audioq);

#else
    QAudioOutput *audioOutput;
    QIODevice *streamOut;

    QAudioFormat audioFmt;
    audioFmt.setSampleRate(44100);
    audioFmt.setChannelCount(2);
    audioFmt.setSampleSize(16);
    audioFmt.setCodec("audio/pcm");
    audioFmt.setByteOrder(QAudioFormat::LittleEndian);
    audioFmt.setSampleType(QAudioFormat::SignedInt);

    QAudioDeviceInfo info = QAudioDeviceInfo::defaultOutputDevice();
    if(!info.isFormatSupported(audioFmt)){
        audioFmt = info.nearestFormat(audioFmt);
    }
    audioOutput = new QAudioOutput(audioFmt);
    audioOutput->setVolume(100);

    streamOut = audioOutput->start();

    double sleep_time=0;
#endif

    while(av_read_frame(fmtCtx,pkt)>=0)
    {
        if(pkt->stream_index==aStreamIndex)
        {

#if PLAYTYPE
            packet_queue_put(audioq, pkt);
            SDL_Delay(10);
#else
            if(avcodec_send_packet(codecCtx,pkt)>=0)
            {
                while(avcodec_receive_frame(codecCtx,frame)>=0)
                {
                    //1.将解码后音频数据保存成pcm文件
                    /* Planar（平面），其数据格式排列方式为 (特别记住，该处是以点nb_samples采样点来交错，不是以字节交错）:
                     * LLLLLLRRRRRRLLLLLLRRRRRRLLLLLLRRRRRRL...（每个LLLLLLRRRRRR为一个音频帧）
                     * 而不带P的数据格式（即交错排列）排列方式为：
                     * LRLRLRLRLRLRLRLRLRLRLRLRLRLRLRLRLRLRL...（每个LR为一个音频样本）
                     */
                    if(av_sample_fmt_is_planar(codecCtx->sample_fmt))
                    {
                        int numBytes =av_get_bytes_per_sample(codecCtx->sample_fmt);
                        //pcm播放时是LRLRLR格式，所以要交错保存数据
                        for(int i=0;i<frame->nb_samples;i++){
                            for(int ch=0;ch<codecCtx->channels;ch++){
                                fwrite((char*)frame->data[ch]+numBytes*i,1,numBytes,file);
                            }
                        }
                    }

                    //2.解码音频后播放
                    if(av_sample_fmt_is_planar(codecCtx->sample_fmt))
                    {
                        int len = swr_convert(swr_ctx,
                                              &audio_out_buffer,
                                              MAX_AUDIO_FRAME_SIZE*2,
                                              (const uint8_t**)frame->data,
                                              frame->nb_samples);
                        if(len<=0){
                            continue;
                        }
                        //qDebug("convert length is: %d.\n",len);

                        int out_size = av_samples_get_buffer_size(0,
                                                                  out_channels,
                                                                  len,
                                                                  out_sample_fmt,
                                                                  1);
                        //qDebug("buffer size is: %d.",dst_bufsize);

                        sleep_time=(out_sample_rate*16*2/8)/out_size;

                        /* 解码两帧间的延时是多少？什么时候该延时？
                         * 通过audioOutput->bytesFree()获取播放缓冲区是否还有数据，有就表示此时缓冲区中还有数据没有播放完，延时等待播放完。
                         * 延时时间: 这里，一秒钟有44100（字节）* 16（位）* 2（通道）/ 8（位）=176400字节
                         * 通过out_size = av_samples_get_buffer_size()获取数据缓冲区中的字节数，总数量/out_size=延时数。
                         */
                        if(audioOutput->bytesFree()<out_size){
                            QTest::qSleep(sleep_time);
                            streamOut->write((char*)audio_out_buffer,out_size);
                        }else {
                            streamOut->write((char*)audio_out_buffer,out_size);
                        }
                    }
                }
            }
#endif
        }

//使用SDL播放，将数据存入队列，因此不调用av_free_packet 释放
#if !PLAYTYPE
        av_packet_unref(pkt);
#endif
    }

    av_frame_free(&frame);
    av_packet_free(&pkt);
    avcodec_close(codecCtx);
    avcodec_free_context(&codecCtx);
    avformat_free_context(fmtCtx);

    fclose(file);

#if PLAYTYPE
    SDL_Quit();
#else
    streamOut->close();
#endif

    return 0;
}
