//
// Created by alexander on 2/23/19.
//

#include </home/alexander/CLionProjects/FFmpegLogTest/log/log.h>
#include "audiovideomuxer.h"
#include <stdio.h>
#include <libavformat/avformat.h>
/*
FIX: H.264 in some container format (FLV, MP4, MKV etc.) need
"h264_mp4toannexb" bitstream filter (BSF)
  *Add SPS,PPS in front of IDR frame
  *Add start code ("0,0,0,1") in front of NALU
H.264 in some container (MPEG2TS) don't need this BSF.
*/
//'1': Use H.264 Bitstream Filter
#define USE_H264BSF 0

/*
FIX: H.264 in some container format (FLV, MP4, MKV etc.) need
"h264_mp4toannexb" bitstream filter (BSF)
  *Add SPS,PPS in front of IDR frame
  *Add start code ("0,0,0,1") in front of NALU
H.264 in some container (MPEG2TS) don't need this BSF.
*/
//'1': Use H.264 Bitstream Filter
#define USE_AACBSF 0

int muxerAudioVideo(const char *inputAudioFileName, const char *inputVideoFileName, const char *outputFileName)
{
    AVOutputFormat *avOutputFormat = NULL;
    AVFormatContext *audioAvFormatContext = NULL, *videoAvFormatContext = NULL, *outputAvFormatContext;
    AVPacket avPacket;
    int returnCode, i;
    int videoIndex_V = -1, videoIndex_Out = -1;
    int audioIndex_A = -1, audioIndex_Out = -1;
    int frameIndex = 0;
    int64_t curPts_V = 0, curPts_A = 0;

    av_register_all();

    /**实例化输入数据的相关对象*/
    returnCode = avformat_open_input(&videoAvFormatContext, inputVideoFileName, 0, 0);
    if(returnCode < 0)
    {
        logError("Could not open input video file!", "");
        goto failed;
    }

    returnCode = avformat_find_stream_info(videoAvFormatContext, 0);
    if(returnCode < 0)
    {
        logError("Failed to get input video stream info!", "");
        goto failed;
    }

    returnCode = avformat_open_input(&audioAvFormatContext, inputAudioFileName, 0, 0);
    if(returnCode < 0)
    {
        logError("Could not open input audio file!", "");
        goto failed;
    }

    returnCode = avformat_find_stream_info(audioAvFormatContext, 0);
    if(returnCode < 0)
    {
        logError("Failed to get input audio stream info!", "");
        goto failed;
    }

    logInfo("Input file Info:\n", "");
    av_dump_format(videoAvFormatContext, 0, inputVideoFileName, 0);
    av_dump_format(audioAvFormatContext, 0, inputAudioFileName, 0);
    logError("==========================\n", "");

    /**实例化输出数据的相关对象*/
    avformat_alloc_output_context2(&outputAvFormatContext, NULL, NULL, outputFileName);
    if(!outputAvFormatContext)
    {
        logError("Could not create output context!\n", "");
        returnCode = AVERROR_UNKNOWN;
        goto failed;
    }
    avOutputFormat = outputAvFormatContext->oformat;

    /**找到输入的视频流 并记录相关参数*/
    for(i = 0; i < videoAvFormatContext->nb_streams; i++)
    {
        /**根据输入流的参数创建输出流*/
        if(videoAvFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            AVStream *inputStream = videoAvFormatContext->streams[i];
            AVStream *outputStream = avformat_new_stream(outputAvFormatContext, inputStream->codec->codec);
            videoIndex_V = i;
            if(!outputStream)
            {
                logError("Failed allocating output stream!\n", "");
                goto failed;
            }

            videoIndex_Out = outputStream->index;
            /**拷贝参数*/
            returnCode = avcodec_copy_context(outputStream->codec, inputStream->codec);
            if(returnCode < 0)
            {
                logError("Failed to copy context from input to output stream codec context\n", "");
                goto failed;
            }
            outputStream->codec->codec_tag = 0;
            if(outputAvFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
            {
                outputStream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
            }
            break;
        }
    }

    /**找到输入的音频流 并记录相关参数*/
    for(i = 0; i < audioAvFormatContext->nb_streams; i++)
    {
        if(audioAvFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            AVStream *inputStream = audioAvFormatContext->streams[i];
            AVStream *outputStream = avformat_new_stream(outputAvFormatContext, inputStream->codec->codec);
            audioIndex_A = i;
            if(!outputStream)
            {
                logError("Failed allocating output stream!\n", "");
                returnCode = AVERROR_UNKNOWN;
                goto failed;
            }
            audioIndex_Out = outputStream->index;
            /**拷贝参数*/
            returnCode = avcodec_copy_context(outputStream->codec, inputStream->codec);
            if(returnCode < 0)
            {
                logError("Failed to copy context from input to output stream codec context\n", "");
                goto failed;
            }
            outputStream->codec->codec_tag = 0;
            if(outputAvFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
            {
                outputStream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
            }
            break;
        }
    }

    logInfo("================outputFile Info================\n", "");
    av_dump_format(outputAvFormatContext, 0, outputFileName,  1);
    logInfo("===============================================\n", "");
    /**打开输出文件*/
    if(!(avOutputFormat->flags & AVFMT_NOFILE))
    {
        returnCode = avio_open(&outputAvFormatContext->pb, outputFileName, AVIO_FLAG_WRITE);
        if(returnCode < 0)
        {
            logError("Could not open output file '%s'!\n", outputFileName);
            goto failed;
        }
    }
    /**写入文件头*/
    returnCode = avformat_write_header(outputAvFormatContext, NULL);
    if(returnCode < 0)
    {
        logError("Error occurred when opening output file!\n", "");
        goto failed;
    }

#if USE_H264BSF
    AVBitStreamFilterContext *h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
#endif
#if USE_AACBSF
    AVBitStreamFilterContext *aacbsfc = av_bitstream_filter_init("aac_adtstoasc");
#endif

    while (1)
    {
        AVFormatContext *inputAvFormatContext;
        int streamIndex = 0;
        AVStream *inputStream, *outputStream;

        /**比较时间戳 决定写入视频还是音频*/
        returnCode = av_compare_ts(curPts_V, videoAvFormatContext->streams[videoIndex_V]->time_base, curPts_A, audioAvFormatContext->streams[audioIndex_A]->time_base);
        if(returnCode < 0)
        {
            /**准备要写入的视频帧*/
            inputAvFormatContext = videoAvFormatContext;
            streamIndex = videoIndex_Out;

            returnCode = av_read_frame(inputAvFormatContext, &avPacket);
            if (returnCode >= 0)
            {
                do
                {
                    inputStream = inputAvFormatContext->streams[avPacket.stream_index];
                    outputStream = outputAvFormatContext->streams[streamIndex];

                    if(avPacket.stream_index == videoIndex_V)
                    {
                        //FIX：No PTS (Example: Raw H.264)
                        //Simple Write PTS
                        if(avPacket.pts == AV_NOPTS_VALUE)
                        {
                            /**Write Pts*/
                            AVRational timeBase1 = inputStream->time_base;
                            /**Duration between 2 frames (us)*/
                            int64_t calcDuration = (double)AV_TIME_BASE/av_q2d(inputStream->r_frame_rate);
                            /**Parameters*/
                            avPacket.pts = (double)(frameIndex * calcDuration) / (double)(av_q2d(timeBase1) * AV_TIME_BASE);
                            avPacket.dts = avPacket.pts;
                            avPacket.duration = (double)calcDuration / (double)(av_q2d(timeBase1) * AV_TIME_BASE);
                            frameIndex++;
                        }
                        curPts_V = avPacket.pts;
                        break;
                    }
                }while (av_read_frame(inputAvFormatContext, &avPacket >= 0));
            }
            else
            {
                break;
            }
        }
        else
        {
            /**准备要写入的音频帧*/
            inputAvFormatContext = audioAvFormatContext;
            streamIndex = audioIndex_Out;
            returnCode = av_read_frame(inputAvFormatContext, &avPacket);
            if(returnCode >= 0)
            {
                do
                {
                    inputStream = inputAvFormatContext->streams[avPacket.stream_index];
                    outputStream = outputAvFormatContext->streams[streamIndex];
                    if(avPacket.stream_index == audioIndex_A)
                    {
                        if(avPacket.pts == AV_NOPTS_VALUE)
                        {
                            AVRational timeBase1 = inputStream->time_base;
                            int64_t  calcDuration = (double)AV_TIME_BASE / av_q2d(inputStream->r_frame_rate);
                            avPacket.pts = (double)(frameIndex * calcDuration) / (double)(av_q2d(timeBase1) * AV_TIME_BASE);
                            avPacket.dts = avPacket.pts;
                            avPacket.duration = (double)calcDuration / (double)(av_q2d(timeBase1) * AV_TIME_BASE);
                            frameIndex++;
                        }
                        curPts_A = avPacket.pts;
                        break;
                    }
                }while (av_read_frame(inputAvFormatContext, &avPacket) >= 0);
            }
            else
            {
                break;
            }
        }

#if USE_H264BSF
        av_bitstream_filter_filter(h264bsfc, inputStream->codec, NULL, &avPacket.data, &avPacket.size, avPacket.data, avPacket.size);
#endif
#if USE_AACBSF
        av_bitstream_filter_filter(aacbsfc, inputStream->codec, NULL, &avPacket.data, &avPacket.size, avPacket.data, avPacket.size);
#endif

        avPacket.pts = av_rescale_q_rnd(avPacket.pts, inputStream->time_base, outputStream->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        avPacket.dts = av_rescale_q_rnd(avPacket.dts, inputStream->time_base, outputStream->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        avPacket.duration = av_rescale_q(avPacket.duration, inputStream->time_base, outputStream->time_base);
        avPacket.pos = -1;
        avPacket.stream_index = streamIndex;

        printf("Write 1 Packet, Size:%5d   pts：%lld\n", avPacket.size, avPacket.pts);
        returnCode = av_interleaved_write_frame(outputAvFormatContext, &avPacket);
        if(returnCode < 0)
        {
            logError("Error muding packet!\n", "");
            goto failed;
        }
        av_free_packet(&avPacket);
    }
    av_write_trailer(outputAvFormatContext);

#if USE_H264BSF
    av_bitstream_filter_close(h264bsfc);
#endif
#if USE_AACBSF
    av_bitstream_filter_close(aacbsfc);
#endif


failed:
    avformat_close_input(&videoAvFormatContext);
    avformat_close_input(&audioAvFormatContext);

    if(outputAvFormatContext && !(avOutputFormat->flags & AVFMT_NOFILE))
    {
        avio_close(outputAvFormatContext->pb);
        avformat_free_context(outputAvFormatContext);
        if(returnCode < 0 && returnCode != AVERROR_EOF)
        {
            logError("Error occurred!\n", "");
            return -1;
        }
        return 0;
    }
}


































