

#include "base_util.h"
#include "codec_config.h"
#include "input_output_context.h"
#include "message_queue.h"
#include "packet_frame_operate.h"
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cstring>
#include <string>
#include <memory>
#include <functional>
#include <strings.h>
#include <thread>
#include "operate_define.h"

extern "C"
{
	#include <libavfilter/avfilter.h>
	#include <libavutil/mem.h>
	#include <libavutil/opt.h>
	#include <libavfilter/buffersrc.h>
	#include <libavfilter/buffersink.h>
}


struct MessageQueue
{
	PacketMessageQueue m_read_packet_queue;
	FrameMessageQueue  m_video_decode_frame_queue;
	FrameMessageQueue  m_video_filter_frame_queue;
	PacketMessageQueue m_video_encode_packet_queue;
};


struct WaterMarkConfig
{
	std::string m_content;
	uint16_t    m_x_pos{0};
	uint16_t    m_y_pos{0};
};

struct WaterMarkFilter
{
	AVFilterContext * m_input_filter_ctx{nullptr};
	AVFilterContext * m_output_filter_ctx{nullptr};
	AVFilterGraph   * m_filter_graph{nullptr};
	~WaterMarkFilter()
	{
		if (m_filter_graph)
		{
			avfilter_graph_free(&m_filter_graph);
			m_input_filter_ctx = nullptr;
			m_output_filter_ctx = nullptr;
			m_filter_graph = nullptr;
		}
	}
	std::map<std::string, std::string> m_watermark_option_map;
};

bool TestFilterApi(WaterMarkFilter * water_mark, std::shared_ptr<MediaStream> video_stream)
{
	water_mark->m_filter_graph = avfilter_graph_alloc();
	auto filter_graph = water_mark->m_filter_graph;
	auto stream_codecpar = video_stream->AvStream()->codecpar;
	// buffer filter
	char args[1024] = {0};
	snprintf(args, sizeof(args) - 1, "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:frame_rate=%d/%d", 
		stream_codecpar->width, 
		stream_codecpar->height, 
		AV_PIX_FMT_YUV420P, 
		video_stream->AvStream()->time_base.num,
		video_stream->AvStream()->time_base.den,
		// 24,1
		video_stream->AvStream()->r_frame_rate.den,
		video_stream->AvStream()->r_frame_rate.num
		);

	PrintLog(args);

	const AVFilter * buffer_filter = avfilter_get_by_name("buffer");
	AVFilterContext * buffer_filter_ctx = nullptr;
	int result = avfilter_graph_create_filter(&buffer_filter_ctx, buffer_filter, "in", args, nullptr, filter_graph);
	if (result < 0)
	{
		PrintLog(result);
		return false;
	}
	snprintf(args, sizeof(args) - 1, "text='Filter API Test':fontsize=60:x=100:y=200:fontcolor=red");
	const AVFilter  * drawtext_filter = avfilter_get_by_name("drawtext");
	AVFilterContext * drawtext_filter_ctx = nullptr;
	result = avfilter_graph_create_filter(&drawtext_filter_ctx, drawtext_filter, "drawtext_index_1", args, nullptr, filter_graph);
	if (result < 0)
	{
		PrintLog(result);
		return false;
	}

	result = avfilter_link(buffer_filter_ctx, 0, drawtext_filter_ctx, 0);
	if (result < 0)
	{
		PrintLog(result);
		return false;
	}
	const AVFilter  * buffersink_filter = avfilter_get_by_name("buffersink");
	AVFilterContext * buffersink_filter_ctx = nullptr;
	result = avfilter_graph_create_filter(&buffersink_filter_ctx, buffersink_filter, "out", nullptr, nullptr, filter_graph);
	if (result < 0)
	{
		PrintLog(result);
		return false;
	}
	enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE};
	av_opt_set_int_list(buffersink_filter_ctx, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);

	result = avfilter_link(drawtext_filter_ctx, 0, buffersink_filter_ctx, 0);
	if (result < 0)
	{
		PrintLog(result);
		return false;
	}
	result = avfilter_graph_config(filter_graph, nullptr);
	if (result < 0)
	{
		PrintLog(result);
		return false;
	}
	water_mark->m_input_filter_ctx = buffer_filter_ctx;
	water_mark->m_output_filter_ctx = buffersink_filter_ctx;
	char * ptr = avfilter_graph_dump(filter_graph, nullptr);
	if (ptr)
	{
		printf("%s", ptr);
		av_free(ptr);
	}	
	return true;
}
// example code 
bool InitWaterMarkFilter(WaterMarkFilter & water_mark_filter)
{
	AVFilterInOut * input_link  = avfilter_inout_alloc();
	AVFilterInOut * output_link = avfilter_inout_alloc();

	const AVFilter * buffer_filter = avfilter_get_by_name("buffer");
	const AVFilter * sink_filter   = avfilter_get_by_name("buffersink");

	char args[1024] = {0};

	snprintf(args, sizeof(args) - 1, "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:frame_rate=24/1",
		1920, 1080, AV_PIX_FMT_YUV420P, 1, 90000);

	printf("%s.\n", args);

	water_mark_filter.m_filter_graph = avfilter_graph_alloc();

	int result = avfilter_graph_create_filter(&water_mark_filter.m_input_filter_ctx, buffer_filter, "in",  args, nullptr, water_mark_filter.m_filter_graph);
	if (result < 0)	
	{
		PrintLog(result);
		return false;
	}
	result = avfilter_graph_create_filter(&water_mark_filter.m_output_filter_ctx, sink_filter, "out", nullptr, nullptr, water_mark_filter.m_filter_graph);
	if (result < 0)
	{
		PrintLog(result);
		return false;
	}
	enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE};
	av_opt_set_int_list(water_mark_filter.m_output_filter_ctx, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
	bzero(args, sizeof(args));

	snprintf(args, sizeof(args) - 1, "[in]drawtext=text='hello world':fontsize=60[drawtext_out_1];[in]drawtext=text='number_test':fontsize=60:x=100:y=200[out]");

	// input_link->name       = av_strdup("out");	
	// input_link->pad_idx    = 0;
	// input_link->filter_ctx = water_mark_filter.m_output_filter_ctx;
	// input_link->next       = nullptr;

	// output_link->name        = av_strdup("in");
	// output_link->pad_idx     = 0;
	// output_link->filter_ctx  = water_mark_filter.m_input_filter_ctx;
	// output_link->next        = nullptr;

	// result = avfilter_graph_parse_ptr(water_mark_filter.m_filter_graph, args, &input_link, &output_link, nullptr);

	// if (result < 0)
	// {
	// 	PrintLog(result);
	// 	return false;
	// }
	auto filter_graph = avfilter_graph_alloc();
	result = avfilter_graph_parse2(filter_graph, args, &input_link, &output_link);
	if (result < 0)
	{
		PrintLog(result);
		return false;
	}
	printf("input avfilter names:\n");
	for (auto * iter = input_link; iter; iter = iter->next)
	{
		printf("index : %d\tname : %s\tavfilter name : %s.\n", iter->pad_idx, iter->name, iter->filter_ctx->filter->name);
	}
	printf(".\n\noutput avfilter names:\n");
	for (auto * iter = output_link; iter; iter=iter->next)
	{
		printf("index: %d\tname : %s\tavfilter name : %s.\n", iter->pad_idx, iter->name, iter->filter_ctx->filter->name);
	}
	printf(".\n\n");
	printf("content : %d.\n", filter_graph->nb_filters);

	for (size_t index = 0; index < filter_graph->nb_filters; index++)
	{
		auto filter_ctx = filter_graph->filters[index];
		if (filter_ctx)
		{
			printf("filter name : %s.\n", filter_ctx->name);
		}
	}
	// result = avfilter_graph_parse_ptr(water_mark_filter.m_filter_graph, args, &input_link, &output_link, nullptr);
	// if (result < 0)
	// {
	// 	PrintLog(result);
	// 	return false;
	// }
	return true;
}


void ReadPacketThread(InputFormatContext & input_format_ctx, MessageQueue & message_queue)
{
	AVPacket * packet = av_packet_alloc();
	ReadPacketDataToMessageQueue(&input_format_ctx, packet, message_queue.m_read_packet_queue);
}

bool ProcessFilterFrame(MessageQueue & message_queue, AVFrame * frame, WaterMarkFilter & water_mark)
{
	int result = av_buffersrc_add_frame_flags(water_mark.m_input_filter_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF);
	if (result < 0)
	{
		PrintLog(result);
		return false;
	}
	AVFrame * recv_frame = nullptr;
	while (true)
	{
		recv_frame = av_frame_alloc();
		result = av_buffersink_get_frame(water_mark.m_output_filter_ctx, recv_frame);
		if (result < 0)
		{
			av_frame_free(&recv_frame);
			if (result == AVERROR(EAGAIN) || result == AVERROR_EOF)
			{
				break;
			}
			PrintLog(result);
			return false;
		}
		message_queue.m_video_filter_frame_queue.PushFrame(&recv_frame);
		recv_frame = nullptr;

	}
	return true;
}



int main(int argc, const char * argv[])
{
	std::string filename = "/home/hjie/source/open_net/ffmpeg/resource/sample_1280x720.mp4";	
	std::string output_filename = "./save.mp4";
	
	InputFormatContext  input_format_ctx;
	OutputFormatContext output_format_ctx;
	bool result = input_format_ctx.OpenUrlWithFindStreamInfo(filename);

	if (result)
	{
		input_format_ctx.CreateVideoDecoderCtx("", CodecConfig{});
		printf("open input file success.\n");
		result = output_format_ctx.CreateUrl(output_filename);
		if (result)
		{
			if (input_format_ctx.VideoMediaStream())
			{
				result = output_format_ctx.CreateVideoStream(input_format_ctx.VideoMediaStream(), CodecConfig{});
				if (!result)
				{
					printf("create video stream failed.\n");
					return EXIT_FAILURE;
				}
			}
			if (input_format_ctx.AudioMediaStream())
			{
				result = output_format_ctx.CreateAudioStream(input_format_ctx.AudioMediaStream(), CodecConfig{});
				if (!result)
				{
					printf("create audio stream failed.\n");
					return EXIT_FAILURE;
				}
			}
			if (output_format_ctx.AvFormatContext()->nb_streams < 0)
			{
				printf("streams is empty.\n");
				return EXIT_FAILURE;
			}
			MessageQueue message_queue;
			message_queue.m_read_packet_queue.InitMessageQueue(1000);
			message_queue.m_video_decode_frame_queue.InitMessageQueue(500);
			message_queue.m_video_filter_frame_queue.InitMessageQueue(500);
			message_queue.m_video_encode_packet_queue.InitMessageQueue(500);

			printf("frame_rate=%d/%d and time_base=%d/%d.\n", 
				output_format_ctx.VideoEncoder()->AvCodecCtx()->framerate.den, output_format_ctx.VideoEncoder()->AvCodecCtx()->framerate.num,
				output_format_ctx.VideoEncoder()->AvCodecCtx()->time_base.num, output_format_ctx.VideoEncoder()->AvCodecCtx()->time_base.den);

			// return 0;

			WaterMarkFilter water_mark_filter;

			result = TestFilterApi(&water_mark_filter, input_format_ctx.VideoMediaStream());
			if (result)
			{
				output_format_ctx.WriterHeaderTailer();
				std::thread thd = std::thread([&message_queue, &input_format_ctx](){

					ReadPacketThread(input_format_ctx, message_queue);
				});

				ProcessPacketDataFromMessageQueue(message_queue.m_read_packet_queue, [&input_format_ctx, &message_queue, &water_mark_filter, &output_format_ctx](AVPacket * packet){

					bool is_video_packet = packet->stream_index == input_format_ctx.VideoMediaStream()->Index();
					if (is_video_packet)
					{
						// video packet data and
						DecodePacketDataToMessageQueue(input_format_ctx.VideoDecoder(), packet, message_queue.m_video_decode_frame_queue);
						// printf("___________frame queue size : %d_________________.\n", message_queue.m_video_decode_frame_queue.GetSize());
						ProcessFrameDataFromMessageQueue(message_queue.m_video_decode_frame_queue, [&water_mark_filter, &message_queue, &output_format_ctx](AVFrame * frame){

							ProcessFilterFrame(message_queue, frame, water_mark_filter);
							int size = message_queue.m_video_filter_frame_queue.GetSize();
							printf("___________filter frame queue size : %d_________________.\n", size);
							for (;true;)
							{
								AVFrame * frame_data = nullptr;
								message_queue.m_video_filter_frame_queue.PopFrameNonBlocking(&frame_data);
								if (frame_data)
								{
									// printf("%ld and %ld.\n", frame_data->pts, frame_data->pkt_dts);
									EncodeFrameDataToMessageQueue(output_format_ctx.VideoEncoder(), frame_data, message_queue.m_video_encode_packet_queue);
									ProcessPacketDataFromMessageQueue(message_queue.m_video_encode_packet_queue, [&output_format_ctx](AVPacket * pkt){

										// printf("%ld and %ld.\n", pkt->pts, pkt->dts);
										av_interleaved_write_frame(output_format_ctx.AvFormatContext(), pkt);

									}, false);
									av_frame_unref(frame_data);
									av_frame_free(&frame_data);
								}
								else
								{
									break;
								}
							}

						}, false);
					}
					else
					{
						// audio packet data

					}

				}, true);
				// 
				{
					// video packet data and
					DecodePacketDataToMessageQueue(input_format_ctx.VideoDecoder(), nullptr, message_queue.m_video_decode_frame_queue);
					// printf("___________frame queue size : %d_________________.\n", message_queue.m_video_decode_frame_queue.GetSize());
					ProcessFrameDataFromMessageQueue(message_queue.m_video_decode_frame_queue, [&water_mark_filter, &message_queue, &output_format_ctx](AVFrame * frame){

						ProcessFilterFrame(message_queue, frame, water_mark_filter);
						int size = message_queue.m_video_filter_frame_queue.GetSize();
						printf("___________filter frame queue size : %d_________________.\n", size);
						for (;true;)
						{
							AVFrame * frame_data = nullptr;
							message_queue.m_video_filter_frame_queue.PopFrameNonBlocking(&frame_data);
							if (frame_data)
							{
								// printf("%ld and %ld.\n", frame_data->pts, frame_data->pkt_dts);
								EncodeFrameDataToMessageQueue(output_format_ctx.VideoEncoder(), frame_data, message_queue.m_video_encode_packet_queue);
								ProcessPacketDataFromMessageQueue(message_queue.m_video_encode_packet_queue, [&output_format_ctx](AVPacket * pkt){

									printf("%ld and %ld.\n", pkt->pts, pkt->dts);
									av_interleaved_write_frame(output_format_ctx.AvFormatContext(), pkt);

								}, false);
								av_frame_unref(frame_data);
								av_frame_free(&frame_data);
							}
							else
							{
								break;
							}
						}

					}, false);
					ProcessFilterFrame(message_queue, nullptr, water_mark_filter);
					int size = message_queue.m_video_filter_frame_queue.GetSize();
					printf("___________filter frame queue size : %d_________________.\n", size);
					for (;true;)
					{
						AVFrame * frame_data = nullptr;
						message_queue.m_video_filter_frame_queue.PopFrameNonBlocking(&frame_data);
						if (frame_data)
						{
							// printf("%ld and %ld.\n", frame_data->pts, frame_data->pkt_dts);
							EncodeFrameDataToMessageQueue(output_format_ctx.VideoEncoder(), frame_data, message_queue.m_video_encode_packet_queue);
							ProcessPacketDataFromMessageQueue(message_queue.m_video_encode_packet_queue, [&output_format_ctx](AVPacket * pkt){

								printf("%ld and %ld.\n", pkt->pts, pkt->dts);
								av_interleaved_write_frame(output_format_ctx.AvFormatContext(), pkt);

							}, false);
							av_frame_unref(frame_data);
							av_frame_free(&frame_data);
						}
						else
						{
							break;
						}
					}
					EncodeFrameDataToMessageQueue(output_format_ctx.VideoEncoder(), nullptr, message_queue.m_video_encode_packet_queue);
					ProcessPacketDataFromMessageQueue(message_queue.m_video_encode_packet_queue, [&output_format_ctx](AVPacket * pkt){

						printf("%ld and %ld.\n", pkt->pts, pkt->dts);
						av_interleaved_write_frame(output_format_ctx.AvFormatContext(), pkt);

					}, false);
				}
				if (thd.joinable())
				{
					thd.join();
				}
				av_write_trailer(output_format_ctx.AvFormatContext());
			}
			else
			{
				printf("______________fail______________.\n");
			}
		}
	}
	return EXIT_SUCCESS;

}
