


#include "base_util.h"
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <locale>
#include <string>
#include <iostream>
#include <thread>
#include "format_template.h"
#include "packet_frame_operate.h"
#include "dump.h"
#include "operate_define.h"
extern "C"
{
	#include <libavcodec/avcodec.h>
	#include <libavfilter/avfilter.h>
	#include <libavfilter/buffersrc.h>
	#include <libavfilter/version.h>
	#include <libavformat/avformat.h>
}

struct AvFilterTestData
{
	AVFilterContext * filter_nullsrc_ctx{nullptr};
	AVFilterContext * filter_buffer_ctx{nullptr};
	AVFilterContext * filter_buffersink_ctx{nullptr};
};

struct MessageTestApi
{
	AVFilterContext * filter_in{nullptr};
	AVFilterContext * filter_out{nullptr};
	AVFilterGraph   * graph{nullptr};
};


void SaveYUVFile(AVFrame * frame)
{
	static int count = 0;
	if (count > 10)
	{
		return;
	}
	std::string name = std::to_string(count++) + ".yuv";
	FILE * file_handle = fopen(name.c_str(), "wb+");
	for (int index = 0; index < 3; index++) {

	    int width  = index == 0 ? frame->width  : frame->width/2;
	    int height = index == 0 ? frame->height : frame->height/2;
	    
	    for (int height_index = 0; height_index < height; height_index++) {

	        // use NULL
	        fwrite(frame->data[index], 1, width, file_handle);
	        frame->data[index] += frame->linesize[index];
	    }
	}
	fclose(file_handle);
}

void SaveYUVFileStream(AVFrame * frame)
{
	static int count = 0;
	if (count > 300)
	{
		return;
	}
	std::string name = std::to_string(30) + ".yuv";
	FILE * file_handle = fopen(name.c_str(), "a+");
	for (int index = 0; index < 3; index++) {

	    int width  = index == 0 ? frame->width  : frame->width/2;
	    int height = index == 0 ? frame->height : frame->height/2;
	    
	    for (int height_index = 0; height_index < height; height_index++) {

	        // use NULL
	        fwrite(frame->data[index], 1, width, file_handle);
	        frame->data[index] += frame->linesize[index];
	    }
	}
	fclose(file_handle);
}

void ProducePict(AVFilterContext * filter_ctx)
{
	while (true)
	{
		AVFrame * frame = av_frame_alloc();
		int result = av_buffersink_get_frame(filter_ctx, frame);
		if (result < 0)
		{
			PrintLog(result);
			break;
		}
		SaveYUVFile(frame);
		av_frame_free(&frame);
	}
}

void GetFrame(AVFilterContext * filter, MessageTestApi & message_api)
{
	while (true)
	{
		AVFrame * frame = av_frame_alloc();
		int result = av_buffersink_get_frame(filter, frame);
		if (result < 0)
		{
			PrintLog(result);
			break;
		}
		while (true)
		{
			result = av_buffersrc_add_frame_flags(message_api.filter_in, frame, AV_BUFFERSRC_FLAG_KEEP_REF);
			if (result < 0)
			{
				if (result == AVERROR(EAGAIN) || result == AVERROR_EOF)
				{
					result = 0;
				}
				break;
			}
			else
			{
				AVFrame * out = av_frame_alloc();
				result = av_buffersink_get_frame(message_api.filter_out, out);
				if (result < 0)
				{
					if (result == AVERROR(EAGAIN) || result == AVERROR_EOF)
					{
						result = 0;
					}
					break;
				}
				else
				{
					SaveYUVFile(out);
				}
				av_frame_free(&out);
			}
		}
		av_frame_free(&frame);
		if (result < 0)
		{
			PrintLog(result);
			break;
		}
	}
}

void TestDrawBox(MessageTestApi & message_api)
{
	const AVFilter * filter = nullptr;
	AVFilterGraph * graph = avfilter_graph_alloc();
	if (graph)
	{
		int result = 0;
		filter = avfilter_get_by_name("buffer");

		AVFilterContext * buffer_ctx = nullptr;
		result = avfilter_graph_create_filter(&buffer_ctx, filter, "in", "video_size=1280x720:frame_rate=25/1:time_base=1/25:pix_fmt=0", nullptr, graph);
		if (result >= 0)
		{
			filter = avfilter_get_by_name("drawbox");
			AVFilterContext * nullsrc_ctx = nullptr;
			result = avfilter_graph_create_filter(&nullsrc_ctx, filter, "box", "x=100:y=100:w=200:h=200:color=red:thickness=1:replace=1", nullptr, graph);
			if (result < 0)
			{
				return;
			}
			result = avfilter_link(buffer_ctx, 0, nullsrc_ctx, 0);
			if (result >= 0)
			{
				filter = avfilter_get_by_name("buffersink");
				AVFilterContext * buffersink_ctx = nullptr;
				result = avfilter_graph_create_filter(&buffersink_ctx, filter, "out", nullptr, nullptr, graph);
				if (result >= 0)
				{
					result = avfilter_link(nullsrc_ctx, 0, buffersink_ctx, 0);
					if (result >= 0)
					{
						avfilter_graph_config(graph, nullptr);
						char * ptr = avfilter_graph_dump(graph, nullptr);
						if (ptr)
						{
							printf("%s.\n", ptr);
							av_free(ptr);
						}
						message_api.filter_in  = buffer_ctx;
						message_api.filter_out = buffersink_ctx;
						message_api.graph      = graph;
					}
					else
					{
						PrintLog(result);
					}
				}
				else
				{
					PrintLog(result);
				}
			}
		}
		else
		{
			PrintLog(result);
		}
	}
}


void ScaleVerlay(MessageTestApi & message_api)
{
	const AVFilter * filter = nullptr;
	AVFilterGraph * graph = avfilter_graph_alloc();
	if (graph)
	{
		int result = 0;
		filter = avfilter_get_by_name("buffer");

		AVFilterContext * buffer_ctx = nullptr;
		result = avfilter_graph_create_filter(&buffer_ctx, filter, "in", "video_size=1280x720:frame_rate=25/1:time_base=1/25:pix_fmt=0", nullptr, graph);
		if (result >= 0)
		{
			filter = avfilter_get_by_name("drawbox");
			AVFilterContext * nullsrc_ctx = nullptr;
			result = avfilter_graph_create_filter(&nullsrc_ctx, filter, "box", "x=100:y=100:w=200:h=200:color=red:thickness=1:replace=1", nullptr, graph);
			if (result < 0)
			{
				return;
			}
			result = avfilter_link(buffer_ctx, 0, nullsrc_ctx, 0);
			if (result >= 0)
			{
				filter = avfilter_get_by_name("buffersink");
				AVFilterContext * buffersink_ctx = nullptr;
				result = avfilter_graph_create_filter(&buffersink_ctx, filter, "out", nullptr, nullptr, graph);
				if (result >= 0)
				{
					result = avfilter_link(nullsrc_ctx, 0, buffersink_ctx, 0);
					if (result >= 0)
					{
						avfilter_graph_config(graph, nullptr);
						char * ptr = avfilter_graph_dump(graph, nullptr);
						if (ptr)
						{
							printf("%s.\n", ptr);
							av_free(ptr);
						}
						message_api.filter_in  = buffer_ctx;
						message_api.filter_out = buffersink_ctx;
						message_api.graph      = graph;
					}
					else
					{
						PrintLog(result);
					}
				}
				else
				{
					PrintLog(result);
				}
			}
		}
		else
		{
			PrintLog(result);
		}
	}
}

void NullsrcTest()
{
	const AVFilter * filter = nullptr;
	AVFilterGraph * graph = avfilter_graph_alloc();
	if (graph)
	{
		int result = 0;
		filter = avfilter_get_by_name("nullsrc");
		AVFilterContext * nullsrc_ctx = nullptr;
		result = avfilter_graph_create_filter(&nullsrc_ctx, filter, "in", "s=1280x720:d=60", nullptr, graph);
		if (result >= 0)
		{
			filter = avfilter_get_by_name("buffersink");
			AVFilterContext * buffersink_ctx = nullptr;
			result = avfilter_graph_create_filter(&buffersink_ctx, filter, "out", nullptr, nullptr, graph);
			if (result >= 0)
			{
				result = avfilter_link(nullsrc_ctx, 0, buffersink_ctx, 0);
				if (result >= 0)
				{
					avfilter_graph_config(graph, nullptr);
					char * ptr = avfilter_graph_dump(graph, nullptr);
					if (ptr)
					{
						printf("%s.\n", ptr);
						av_free(ptr);
					}
					// ProducePict(buffersink_ctx);
					MessageTestApi message_api;
					TestDrawBox(message_api);
					if (message_api.filter_in && message_api.filter_out && message_api.graph)
					{
						GetFrame(buffersink_ctx, message_api);
					}
				}
				else
				{
					PrintLog(result);
				}
			}
			else
			{
				PrintLog(result);
			}
		}
		else
		{
			PrintLog(result);
		}
	}
}

static void ProcessVideoData(NullSrcAndOverlay & data, MessageQueueData & message)
{
    std::map<int, double> time_compare_map;
    time_compare_map[0] = 0;
    time_compare_map[1] = 0;
    for (;true;)
    {
        InputFormatContext * select_format = nullptr;
        FrameMessageQueue * frame_queue = nullptr;
        AVPacket * packet = nullptr;
        int select_stream = 0;
        if (time_compare_map[0] < time_compare_map[1])
        {
            select_stream = 0;
            int result = message.m_read_packet_queue.PopPacket(&packet);
            if (result == AVERROR_EOF)
            {
                select_format = &data.m_input_ctx;
                frame_queue   = &message.m_decoder_frame_queue;

                DecodePacketDataToMessageQueue(data.m_input_ctx.VideoDecoder(), nullptr, message.m_decoder_frame_queue);
                if (message.m_decoder_frame_queue.GetSize() == 0)
                {
                    FilterProcessFrame(data, data.m_buffer_ctx, nullptr, message.m_filter_frame_queue);
                    result = message.m_read_packet_queue_1.PopPacket(&packet);
                    if (result == AVERROR_EOF)
                    {
                        break;
                    }
                    select_stream = 1;
                    select_format = &data.m_input_ctx_1;
                    frame_queue   = &message.m_decoder_frame_queue_1;
                }
            }
            else
            {
                select_format = &data.m_input_ctx;
                frame_queue   = &message.m_decoder_frame_queue;
            }
        }
        else
        {
            select_stream = 1;
            int result = message.m_read_packet_queue_1.PopPacket(&packet);
            if (result == AVERROR_EOF)
            {
                select_format = &data.m_input_ctx_1;
                frame_queue   = &message.m_decoder_frame_queue_1;
                DecodePacketDataToMessageQueue(data.m_input_ctx_1.VideoDecoder(), nullptr, message.m_decoder_frame_queue_1);
                if (message.m_decoder_frame_queue_1.GetSize() == 0)
                {
                    FilterProcessFrame(data, data.m_buffer_ctx_1, nullptr, message.m_filter_frame_queue);
                    result = message.m_read_packet_queue.PopPacket(&packet);
                    if (result == AVERROR_EOF)
                    {
                        break;
                    }
                    select_stream = 0;
                    select_format = &data.m_input_ctx;
                    frame_queue   = &message.m_decoder_frame_queue;
                }
            }
            else
            {
                select_format = &data.m_input_ctx_1;
                frame_queue   = &message.m_decoder_frame_queue_1;
            }
        }
        if (packet)
        {
            if (packet->stream_index != select_format->VideoMediaStream()->AvStream()->index)
            {
                av_packet_free(&packet);
                continue;
            }
            time_compare_map[select_stream] += packet->duration * av_q2d(select_format->VideoDecoder()->AvCodecCtx()->pkt_timebase) * AV_TIME_BASE;
        }
        else
        {
            time_compare_map[select_stream] += 1000.0;
        }
        // printf("select stream : %d.\n", select_stream);
        DecodePacketDataToMessageQueue(select_format->VideoDecoder(), packet, *frame_queue);
        ProcessFrameDataFromMessageQueue(*frame_queue,[&data, &message, &select_stream](AVFrame * frame){

            if (!data.m_buffer_sink_ctx)
            {
                int result = NullSrcAndOverlayInit(data);
                if (result < 0)
                {
                    exit(-1);
                }
            }
            if (select_stream == 0)
            {
                FilterProcessFrame(data, data.m_buffer_ctx, frame, message.m_filter_frame_queue);
            }
            else
            {
                FilterProcessFrame(data, data.m_buffer_ctx_1, frame, message.m_filter_frame_queue);
            }

        }, false);
        ProcessFrameDataFromMessageQueue(message.m_filter_frame_queue, [&data, &message](AVFrame * frame){

            frame->pict_type = AV_PICTURE_TYPE_NONE;
            if (!data.m_output_ctx.VideoMediaStream())
            {
                InitOutputData(data, frame);
                frame->pict_type = AV_PICTURE_TYPE_I;
            }
            {
                auto filter_tm = av_buffersink_get_time_base(data.m_buffer_sink_ctx);
                printf("pts : %ld filter time_base : %d/%d.\n", frame->pts, filter_tm.num, filter_tm.den);
            }
            EncodeFrameDataToMessageQueue(data.m_output_ctx.VideoEncoder(), frame, message.m_encoder_packet_queue);
            ProcessPacketDataFromMessageQueue(message.m_encoder_packet_queue,[&data](AVPacket * packet){

                auto filter_tm = av_buffersink_get_time_base(data.m_buffer_sink_ctx);
                av_packet_rescale_ts(packet, filter_tm, data.m_output_ctx.VideoMediaStream()->AvStream()->time_base);
                packet->time_base = data.m_output_ctx.VideoMediaStream()->AvStream()->time_base;
                packet->pos = -1;
                av_interleaved_write_frame(data.m_output_ctx.AvFormatContext(), packet);

            }, false);

        }, false);
        if (packet)
        {
            av_packet_free(&packet);
        }
    }
    EncodeFrameDataToMessageQueue(data.m_output_ctx.VideoEncoder(), nullptr, message.m_encoder_packet_queue);
    ProcessPacketDataFromMessageQueue(message.m_encoder_packet_queue,[&data](AVPacket * packet){

        auto filter_tm = av_buffersink_get_time_base(data.m_buffer_sink_ctx);
        av_packet_rescale_ts(packet, filter_tm, data.m_output_ctx.VideoMediaStream()->AvStream()->time_base);
        packet->time_base = data.m_output_ctx.VideoMediaStream()->AvStream()->time_base;
        packet->pos = -1;
        av_interleaved_write_frame(data.m_output_ctx.AvFormatContext(), packet);

    }, false);
}

void FormatTemplate()
{
	NullSrcAndOverlay over{};
	over.m_graph = avfilter_graph_alloc();
	if (over.m_graph)
	{
		int result = InitInputStream(over);
		if (result >= 0)
		{
			MessageQueueData message_queue;
            message_queue.m_decoder_frame_queue.InitMessageQueue(1000);
            message_queue.m_read_packet_queue.InitMessageQueue(1000);
            message_queue.m_filter_frame_queue.InitMessageQueue(1000);
            message_queue.m_encoder_packet_queue.InitMessageQueue(1000);
            message_queue.m_read_packet_queue_1.InitMessageQueue(1000);
            message_queue.m_decoder_frame_queue_1.InitMessageQueue(1000);
			std::thread thd([&message_queue, &over](){

				AVPacket * packet = av_packet_alloc();
				ReadPacketDataToMessageQueue(&over.m_input_ctx, packet, message_queue.m_read_packet_queue);
			});
            std::thread tdh1([&message_queue, &over](){

                AVPacket * packet = av_packet_alloc();
                ReadPacketDataToMessageQueue(&over.m_input_ctx_1, packet, message_queue.m_read_packet_queue_1);
            });
            ProcessVideoData(over, message_queue);
            /*
			ProcessPacketDataFromMessageQueue(message_queue.m_read_packet_queue, [&over, &message_queue](AVPacket * packet){

				int is_video = packet->stream_index == over.m_input_ctx.VideoMediaStream()->AvStream()->index;
				if (is_video)
				{
					DecodePacketDataToMessageQueue(over.m_input_ctx.VideoDecoder(), packet, message_queue.m_decoder_frame_queue);
					ProcessFrameDataFromMessageQueue(message_queue.m_decoder_frame_queue, [&over, &message_queue](AVFrame * frame){

						if (!over.m_buffer_sink_ctx)
						{
							int result = NullSrcAndOverlayInit(over);
							if (result < 0)
							{
								exit(-1);
							}
						}
						FilterProcessFrameInput0(over, frame, message_queue.m_filter_frame_queue);
						FilterProcessFrameInput1(over, frame, message_queue.m_filter_frame_queue);
						ProcessFrameDataFromMessageQueue(message_queue.m_filter_frame_queue, [&over, &message_queue](AVFrame * frame){

                            frame->pict_type = AV_PICTURE_TYPE_NONE;
							if (!over.m_output_ctx.VideoMediaStream())
							{
								InitOutputData(over, frame);
								// over.m_output_ctx.WriterHeaderTailer();
                                frame->pict_type = AV_PICTURE_TYPE_I;
                            }
							// SaveYUVFileStream(frame);
							{
								auto filter_tm = av_buffersink_get_time_base(over.m_buffer_sink_ctx);
								printf("pts : %ld filter time_base : %d/%d.\n", frame->pts, filter_tm.num, filter_tm.den);
							}
							EncodeFrameDataToMessageQueue(over.m_output_ctx.VideoEncoder(), frame, message_queue.m_encoder_packet_queue);
							ProcessPacketDataFromMessageQueue(message_queue.m_encoder_packet_queue,[&over](AVPacket * packet){

								auto filter_tm = av_buffersink_get_time_base(over.m_buffer_sink_ctx);
								av_packet_rescale_ts(packet, filter_tm, over.m_output_ctx.VideoMediaStream()->AvStream()->time_base);
								packet->time_base = over.m_output_ctx.VideoMediaStream()->AvStream()->time_base;
								packet->pos = -1;
								av_interleaved_write_frame(over.m_output_ctx.AvFormatContext(), packet);

							}, false);

						}, false);

					}, false);
				}

			}, true);
			{
				DecodePacketDataToMessageQueue(over.m_input_ctx.VideoDecoder(), nullptr, message_queue.m_decoder_frame_queue);
				ProcessFrameDataFromMessageQueue(message_queue.m_decoder_frame_queue, [&over, &message_queue](AVFrame * frame){

					FilterProcessFrameInput0(over, frame, message_queue.m_filter_frame_queue);
					FilterProcessFrameInput1(over, frame, message_queue.m_filter_frame_queue);
					ProcessFrameDataFromMessageQueue(message_queue.m_filter_frame_queue, [&over, &message_queue](AVFrame * frame){

						// SaveYUVFileStream(frame);
						frame->pict_type = AV_PICTURE_TYPE_NONE;
						EncodeFrameDataToMessageQueue(over.m_output_ctx.VideoEncoder(), frame, message_queue.m_encoder_packet_queue);
						ProcessPacketDataFromMessageQueue(message_queue.m_encoder_packet_queue,[&over](AVPacket * packet){

							auto filter_tm = av_buffersink_get_time_base(over.m_buffer_sink_ctx);
							av_packet_rescale_ts(packet, filter_tm, over.m_output_ctx.VideoMediaStream()->AvStream()->time_base);
							packet->time_base = over.m_output_ctx.VideoMediaStream()->AvStream()->time_base;
							packet->pos = -1;
							av_interleaved_write_frame(over.m_output_ctx.AvFormatContext(), packet);

						}, false);

					}, false);

				}, false);
				{
					FilterProcessFrameInput0(over, nullptr, message_queue.m_filter_frame_queue);
					FilterProcessFrameInput1(over, nullptr, message_queue.m_filter_frame_queue);
					ProcessFrameDataFromMessageQueue(message_queue.m_filter_frame_queue, [&over, &message_queue](AVFrame * frame){

						frame->pict_type = AV_PICTURE_TYPE_NONE;
						EncodeFrameDataToMessageQueue(over.m_output_ctx.VideoEncoder(), frame, message_queue.m_encoder_packet_queue);
						ProcessPacketDataFromMessageQueue(message_queue.m_encoder_packet_queue,[&over](AVPacket * packet){

							auto filter_tm = av_buffersink_get_time_base(over.m_buffer_sink_ctx);
							av_packet_rescale_ts(packet, filter_tm, over.m_output_ctx.VideoMediaStream()->AvStream()->time_base);
							packet->time_base = over.m_output_ctx.VideoMediaStream()->AvStream()->time_base;
							packet->pos = -1;
							av_interleaved_write_frame(over.m_output_ctx.AvFormatContext(), packet);

						}, false);

					}, false);
					{
						EncodeFrameDataToMessageQueue(over.m_output_ctx.VideoEncoder(), nullptr, message_queue.m_encoder_packet_queue);
						ProcessPacketDataFromMessageQueue(message_queue.m_encoder_packet_queue,[&over](AVPacket * packet){

							auto filter_tm = av_buffersink_get_time_base(over.m_buffer_sink_ctx);
							av_packet_rescale_ts(packet, filter_tm, over.m_output_ctx.VideoMediaStream()->AvStream()->time_base);
							packet->time_base = over.m_output_ctx.VideoMediaStream()->AvStream()->time_base;
							packet->pos = -1;
							av_interleaved_write_frame(over.m_output_ctx.AvFormatContext(), packet);

						}, false);
					}
				}
			}
             */
			if (thd.joinable())
			{
				thd.join();
			}
            if (tdh1.joinable())
            {
                tdh1.join();
            }
			av_write_trailer(over.m_output_ctx.AvFormatContext());
		}
	}
}

int main()
{
    av_log_set_level(AV_LOG_VERBOSE);
	FormatTemplate();
	return 0;
}