#include <mediadecode.h>
#include <iostream>
#include <ImageMedia.h>
#include <FilterRenderAdapter.h>
#include "audioencoder.h"
#include "audioresampler.h"
#include "videoencoder.h"
#include "muxer.h"
#include "ImageUtils.h"
using namespace std;

#define YUV_WIDTH 720
#define YUV_HEIGHT 576
#define YUV_FPS  25

#define VIDEO_BIT_RATE 500*1024

#define PCM_SAMPLE_FORMAT AV_SAMPLE_FMT_S16
#define PCM_SAMPLE_RATE 44100
#define PCM_CHANNELS 2

#define AUDIO_BIT_RATE 128*1024

#define AUDIO_TIME_BASE 1000000
#define VIDEO_TIME_BASE 1000000

MediaDecode *mediaDecode = nullptr;
Muxer *mp4_muxer = nullptr ;
VideoEncoder *videoEncoder = nullptr;
AVFrame *yuv_frame = nullptr;
int64_t audio_time_base = AUDIO_TIME_BASE;
int64_t video_time_base = VIDEO_TIME_BASE;
double audio_pts = 0;
double video_pts = 0;
double audio_frame_duration = 0;
double video_frame_duration = 0;
int yuv_fps = YUV_FPS;
int video_bit_rate = VIDEO_BIT_RATE;
int ret = 0;
AVFrame *logo_frame;
AVFilterContext *buffersink_ctx;
AVFilterContext *buffersrc_ctx;
AVFilterGraph *filter_graph;
AVFrame *filt_frame;
AVPacket *video_packet = nullptr;
const char *filter_descr = "movie=/Users/edy/Desktop/ffmpeg_video/log.png[overlog];[in][overlog]overlay=10:main_h-overlay_h-10";
FilterRenderAdapter *filterRenderAdapter;
int init_filters(const char *filters_descr);

//ffmpeg -i sound_in_sync_test.mp4 -pix_fmt yuv420p 720x576_yuv420p.yuv
//ffmpeg -i sound_in_sync_test.mp4 -vn -ar 44100 -ac 2 -f s16le 44100_2_s16le.pcm
// 执行文件  yuv文件 pcm文件 输出mp4文件
int main(int argc, char **argv)
{

    if(argc != 4){
        printf("argc -> 缺少元素 \n");
        return -1;
    }

    char *video_input_file = argv[1];
    char *logo_file = argv[2];
    char *video_output_file = argv[3];

    FILE *video_input_fd;
    FILE *logo_fd;

    bool isDecodeVideo = true;
    bool isDecodeAudio = false;

    video_input_fd = fopen(video_input_file, "rb");
    if( !video_input_fd )
    {
        LOGE("Failed to open %s file\n", video_input_file);
        return -1;
    }

    // 打开logo文件
    logo_fd = fopen(logo_file, "rb");
    if( !logo_fd )
    {
        LOGE("Failed to open %s file\n", logo_file);
        return -1;
    }

    mediaDecode = new MediaDecode();
    mediaDecode->init(video_input_file,isDecodeVideo,isDecodeAudio);

    const AVCodecContext *video_av_cxt = mediaDecode->getVideoAVCodecContext();
    int yuv_width = video_av_cxt->width;
    int yuv_height = video_av_cxt->height;

    videoEncoder = new VideoEncoder();
    videoEncoder->InitH264(yuv_width,yuv_height,yuv_fps,video_bit_rate);

    mp4_muxer = new Muxer();
    ret = mp4_muxer->Init(video_output_file);
    if(ret < 0)
    {
        LOGE("mp4_muxer.Init failed\n");
        return -1;
    }

    ret = mp4_muxer->AddStream(videoEncoder->GetCodecContext());
    if(ret < 0)
    {
        LOGE("mp4_muxer.AddStream video failed\n");
        return -1;
    }
    ret = mp4_muxer->AddStream(mediaDecode->getAudioAVCodecContext());

    if(ret < 0)
    {
        LOGE("mp4_muxer.AddStream audio failed\n");
        return -1;
    }

    ret = mp4_muxer->Open();
    if(ret < 0)
    {
        LOGE("mp4_muxer.Open failed\n");
        return -1;
    }

    ret = mp4_muxer->SendHeader();
    if(ret < 0)
    {
        LOGE("mp4_muxer.SendHeader failed\n");
        return -1;
    }

   logo_frame = convertToYUV420P(get_frame_from_jpegfile(logo_file));

//    filterRenderAdapter = new FilterRenderAdapter();
//    ret = filterRenderAdapter->initAdapter(logo_file,
//                                     yuv_width,yuv_height,video_av_cxt->pix_fmt,video_av_cxt->sample_aspect_ratio,10,10);

    ret = filterRenderAdapter->initAdapter(yuv_width,yuv_height,video_av_cxt->pix_fmt,video_av_cxt->sample_aspect_ratio,
                                           logo_frame->width,logo_frame->height,(AVPixelFormat)logo_frame->format,logo_frame->sample_aspect_ratio,10,10);

    if(ret < 0){
        LOGE("filterRenderAdapter->initAdapter is failure!");
        return -1;
    }

    filt_frame = av_frame_alloc();

    mediaDecode->decode([](AVPacket *avPacket,AVFrame *avFrame,int index, AVRational time_base){

        if(avFrame){
            ret = filterRenderAdapter->onConvert(avFrame,logo_frame,filt_frame);
        }
        if(!avFrame){
            video_packet = videoEncoder->Encode(nullptr,index,0, time_base);
            if(video_packet){
                mp4_muxer->SendPacket(video_packet, videoEncoder->GetCodecContext()->time_base);
            }
           return -1;
        }
        if(filt_frame){
            video_packet = videoEncoder->Encode(filt_frame,index,filt_frame->pts, time_base);
            if(video_packet){
                mp4_muxer->SendPacket(video_packet, time_base);
            }
          av_frame_unref(filt_frame);
          if(video_packet){
              av_packet_unref(video_packet);
          }
        }

        return 0;
    },[](AVPacket *avPacket,AVFrame *avFrame,int index, AVRational time_base){
        LOGE("write audio pts:%lld\n", avPacket->pts);
        if(!avPacket){
           return -1;
        }
        mp4_muxer->SendPacket(avPacket, time_base);
        return 0;
    },[](){
        LOGE("mediaDecode->finish\n");
        mp4_muxer->SendTrailer();
        mp4_muxer->DeInit();
        videoEncoder->DeInit();
        filterRenderAdapter->deInit();
        av_frame_unref(logo_frame);
        av_frame_free(&logo_frame);
        return 0;
    });
   return 0;
}


int init_filters(const char *filters_descr)
{
   char args[512];
   int ret = 0;
   const AVFilter *buffersrc = avfilter_get_by_name("buffer");
   const AVFilter *buffersink = avfilter_get_by_name("buffersink");
   AVFilterInOut *outputs = avfilter_inout_alloc();
   AVFilterInOut *inputs = avfilter_inout_alloc();
   AVRational time_base = {1, 1000};
   enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };

   filter_graph = avfilter_graph_alloc();
   if (!outputs || !inputs || !filter_graph) {
       ret = AVERROR(ENOMEM);
       return -1;
   }
   AVCodecContext *dec_ctx = mediaDecode->getVideoAVCodecContext();
   /* buffer video source: the decoded frames from the decoder will be inserted here. */
   snprintf(args, sizeof(args),
       "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
       dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
       time_base.num, time_base.den,
       dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);

   ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
       args, NULL, filter_graph);
   if (ret < 0) {
       av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
       goto end;
   }

   /* buffer video sink: to terminate the filter chain. */
   ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
       NULL, NULL, filter_graph);
   if (ret < 0) {
       av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
       goto end;
   }

   ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
       AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
   if (ret < 0) {
       av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
       goto end;
   }

   outputs->name = av_strdup("in");
   outputs->filter_ctx = buffersrc_ctx;
   outputs->pad_idx = 0;
   outputs->next = NULL;

   inputs->name = av_strdup("out");
   inputs->filter_ctx = buffersink_ctx;
   inputs->pad_idx = 0;
   inputs->next = NULL;

   if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
       &inputs, &outputs, NULL)) < 0)
       goto end;

   if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
       goto end;

end:
   avfilter_inout_free(&inputs);
   avfilter_inout_free(&outputs);

   return ret;
}




int encode(int argc, char **argv){
    if(argc != 4) {
        LOGE("usage -> exe in.yuv in.pcm out.mp4");
        return -1;
    }
    // 1. 打开yuv pcm文件
    char *in_yuv_name = argv[1];
    char *in_pcm_name = argv[2];
    char *out_mp4_name = argv[3];
    FILE *in_yuv_fd = NULL;
    FILE *in_pcm_fd = NULL;
    //1. 打开测试文件
    // 打开YUV文件
    in_yuv_fd = fopen(in_yuv_name, "rb");
    if( !in_yuv_fd )
    {
        LOGE("Failed to open %s file\n", in_yuv_name);
        return -1;
    }

    // 打开PCM文件
    in_pcm_fd = fopen(in_pcm_name, "rb");
    if( !in_pcm_fd )
    {
        LOGE("Failed to open %s file\n", in_pcm_name);
        return -1;
    }

    int ret = 0;
    // 2. 初始化编码器，包括视频、音频编码器, 分配yuv、pcm的帧buffer
    // 2.1 初始化video
    // 初始化编码器
    int yuv_width = YUV_WIDTH;
    int yuv_height = YUV_HEIGHT;
    int yuv_fps = YUV_FPS;
    int video_bit_rate = VIDEO_BIT_RATE;
    VideoEncoder video_encoder;
    ret = video_encoder.InitH264(yuv_width, yuv_height, yuv_fps, video_bit_rate);
    if(ret < 0)
    {
        LOGE("video_encoder.InitH264 failed\n");
        return -1;
    }
    // 分配yuv buf
    int y_frame_size = yuv_width * yuv_height;
    int u_frame_size = yuv_width * yuv_height /4;
    int v_frame_size = yuv_width * yuv_height /4;
    int yuv_frame_size = y_frame_size + u_frame_size + v_frame_size;
    uint8_t *yuv_frame_buf = (uint8_t *)malloc(yuv_frame_size);
    if(!yuv_frame_buf)
    {
        LOGE("malloc(yuv_frame_size)\n");
        return -1;
    }

    // 2.2 初始化audio
    // 初始化音频编码器
    int pcm_channels= PCM_CHANNELS;
    int pcm_sample_rate = PCM_SAMPLE_RATE;
    int pcm_sample_format = PCM_SAMPLE_FORMAT;
    int audio_bit_rate = AUDIO_BIT_RATE;
    AudioEncoder audio_encoder;
    ret = audio_encoder.InitAAC(pcm_channels, pcm_sample_rate, audio_bit_rate);
    if(ret < 0)
    {
        LOGE("audio_encoder.InitAAC failed\n");
        return -1;
    }
    // 分配pcm buf
    // pcm_frame_size  = 单个采样点占用的字节 * 通道数量 * 每个通道有多少给采用点
    int pcm_frame_size = av_get_bytes_per_sample((AVSampleFormat)pcm_sample_format)
            *pcm_channels * audio_encoder.GetFrameSize();
    if(pcm_frame_size <= 0) {
        LOGE("pcm_frame_size <= 0\n");
        return -1;
    }
    uint8_t *pcm_frame_buf = (uint8_t *)malloc(pcm_frame_size);
    if(!pcm_frame_buf)
    {
        LOGE("malloc(pcm_frame_size)\n");
        return -1;
    }

    // 初始化重采样
    AudioResampler audio_resampler;
    ret = audio_resampler.InitFromS16ToFLTP(pcm_channels, pcm_sample_rate,
                                            audio_encoder.GetChannels(), audio_encoder.GetSampleRate());
    if(ret < 0)
    {
        LOGE("audio_resampler.InitFromS16ToFLTP failed\n");
        return -1;
    }

    // 3. mp4初始化 包括新建流，open io, send header
    Muxer mp4_muxer;
    ret = mp4_muxer.Init(out_mp4_name);
    if(ret < 0)
    {
        LOGE("mp4_muxer.Init failed\n");
        return -1;
    }

    ret = mp4_muxer.AddStream(video_encoder.GetCodecContext());
    if(ret < 0)
    {
        LOGE("mp4_muxer.AddStream video failed\n");
        return -1;
    }

    ret = mp4_muxer.AddStream(audio_encoder.GetCodecContext());
    if(ret < 0)
    {
        LOGE("mp4_muxer.AddStream audio failed\n");
        return -1;
    }

    ret = mp4_muxer.Open();
    if(ret < 0)
    {
        LOGE("mp4_muxer.Open failed\n");
        return -1;
    }

    ret = mp4_muxer.SendHeader();
    if(ret < 0)
    {
        LOGE("mp4_muxer.SendHeader failed\n");
        return -1;
    }
    // 4. 在while循环读取yuv、pcm进行编码然后发送给MP4 muxer
    // 4.1 时间戳相关
    int64_t audio_time_base = AUDIO_TIME_BASE;
    int64_t video_time_base = VIDEO_TIME_BASE;
    double audio_pts = 0;
    double video_pts = 0;
    double audio_frame_duration = 1.0 * audio_encoder.GetFrameSize()/pcm_sample_rate*audio_time_base;
    double video_frame_duration = 1.0/yuv_fps * video_time_base;

    int audio_finish = 0;   // 两者都为0的时候才结束while循环
    int video_finish = 0;

    size_t read_len = 0;
    AVPacket *packet =  NULL;
    std::vector<AVPacket *> packets;
    int audio_index = mp4_muxer.GetAudioStreamIndex();
    int video_index = mp4_muxer.GetVideoStreamIndex();
    while (1) {
        if(audio_finish && video_finish) {
            break;
        }
        LOGE("apts:%0.0lf vpts:%0.0lf\n", audio_pts/1000, video_pts/1000);
        if((video_finish != 1 && audio_pts > video_pts)   // audio和vidoe都还有数据，优先audio（audio_pts > video_pts）
                ||  (video_finish != 1 && audio_finish == 1)) {
            read_len = fread(yuv_frame_buf, 1, yuv_frame_size, in_yuv_fd);
            if(read_len < yuv_frame_size) {
                video_finish = 1;
                LOGE("fread yuv_frame_buf finish\n");
            }
            if(video_finish != 1) {
                //                packet = video_encoder.Encode(yuv_frame_buf, yuv_frame_size, video_index,
                //                                              video_pts, video_time_base);
                ret = video_encoder.Encode(yuv_frame_buf, yuv_frame_size,
                                           video_index, video_pts, video_time_base,
                                           packets);
            }else {
                //                packet = video_encoder.Encode(NULL, 0, video_index,
                //                                              video_pts, video_time_base);
                LOGE("flush video encoder\n");
                ret = video_encoder.Encode(NULL, 0,
                                           video_index, video_pts, video_time_base,
                                           packets);
            }
            video_pts += video_frame_duration;  // 叠加pts
            //            if(packet) {
            //                mp4_muxer.SendPacket(packet);
            //            }
            if(ret >= 0) {
                for(int i = 0; i < packets.size(); i++) {
                    ret = mp4_muxer.SendPacket(packets[i]);
                }
            }
            packets.clear();
        } else if(audio_finish != 1) {
            read_len = fread(pcm_frame_buf, 1, pcm_frame_size, in_pcm_fd);
            if(read_len < pcm_frame_size) {
                audio_finish = 1;
                LOGE("fread pcm_frame_buf finish\n");
            }

            if(audio_finish != 1) {
                AVFrame *fltp_frame = AllocFltpPcmFrame(pcm_channels, audio_encoder.GetFrameSize());
                ret = audio_resampler.ResampleFromS16ToFLTP(pcm_frame_buf, fltp_frame);
                if(ret < 0)
                    LOGE("ResampleFromS16ToFLTP error\n");

                ret = audio_encoder.Encode(fltp_frame,
                                           audio_index, audio_pts, audio_time_base,
                                           packets);
                FreePcmFrame(fltp_frame);
            }else {
                LOGE("flush audio encoder\n");
                //                packet = audio_encoder.Encode(NULL,video_index,
                //                                              audio_pts, audio_time_base);
                ret = audio_encoder.Encode(NULL,
                                           audio_index, audio_pts, audio_time_base,
                                           packets);
            }
            audio_pts += audio_frame_duration;  // 叠加pts
            //            if(packet) {
            //                mp4_muxer.SendPacket(packet);
            //            }
            if(ret >= 0) {
                for(int i = 0; i < packets.size(); i++) {
                    ret = mp4_muxer.SendPacket(packets[i]);
                }
            }
            packets.clear();
        }
    }
    ret = mp4_muxer.SendTrailer();
    if(ret < 0)
    {
        LOGE("mp4_muxer.SendTrailer failed\n");
    }

    LOGE("write mp4 finish\n");

    if(yuv_frame_buf)
        free(yuv_frame_buf);
    if(pcm_frame_buf)
        free(pcm_frame_buf);
    if(in_yuv_fd)
        fclose(in_yuv_fd);
    if(in_pcm_fd)
        fclose(in_pcm_fd);

    return 0;
}




