
// Created by hjie on 24-1-2.
//
#include "xstack_filter_format.h"
#include <gtest/gtest.h>
#include "unit_test/unit_test.h"
#include <libavcodec/packet.h>
#include <libavformat/avformat.h>
#include <libavutil/error.h>
#include <libavutil/frame.h>
#include <vector>
#include <string>
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <iostream>

void SaveYUVFile(AVFrame * frame)
{
    static int count = 0;
    if (count > 100)
    {
        return;
    }
    std::string name = std::to_string(count++) + ".yuv";
    FILE * file_handle = fopen(name.c_str(), "wb+");
    for (int index = 0; index < 3; index++) {

        int width  = index == 0 ? frame->width  : frame->width/2;
        int height = index == 0 ? frame->height : frame->height/2;

        for (int height_index = 0; height_index < height; height_index++) {

            // use NULL
            fwrite(frame->data[index], 1, width, file_handle);
            frame->data[index] += frame->linesize[index];
        }
    }
    fclose(file_handle);
}

void SaveYUVFileStream(AVFrame * frame)
{
    static int count = 0;
    std::string name = std::to_string(30) + ".yuv";
    if (count == 0)
    {
        remove(name.c_str());
        count++;
    }
    FILE * file_handle = fopen(name.c_str(), "a+");
    for (int index = 0; index < 3; index++) {

        int width  = index == 0 ? frame->width  : frame->width/2;
        int height = index == 0 ? frame->height : frame->height/2;

        for (int height_index = 0; height_index < height; height_index++) {

            // use NULL
            fwrite(frame->data[index], 1, width, file_handle);
            frame->data[index] += frame->linesize[index];
        }
    }
    fclose(file_handle);
}

int ProcessFilterData(XstackFilterDataInfo & data, int stream_index, AVFrame * frame)
{
    int result = av_buffersrc_add_frame_flags(data.m_input_stream_map[stream_index]->m_filter_buffer, frame, AV_BUFFERSRC_FLAG_KEEP_REF);
    if (result < 0)
    {
        PrintLog(result);
        return 0;
    }
    while (true)
    {
        AVFrame * filter_frame = nullptr;
        filter_frame = av_frame_alloc();
        result = av_buffersink_get_frame(data.m_output_file_ctx->m_filter_sink_ctx, filter_frame);
        if (result < 0)
        {
            if (result == AVERROR_EOF || result == AVERROR(EAGAIN))
            {
                result = 0;
            }
            av_frame_free(&filter_frame);
            break;
        }
        data.m_filter_frame_queue.PushFrame(&filter_frame);
    }
    return result;
}

void ProcessFrame(XstackFilterDataInfo & data, std::map<int, double> & input_stream_map, int stream_index)
{
    auto & input_file = data.m_input_stream_map[stream_index]->m_stream_link_input_file_ctx;
    for (;true;)
    {
        // compare ts
        AVFrame * frame = nullptr;
        int result = data.m_input_stream_map[stream_index]->m_decode_frame_queue.PopFrameNonBlocking(&frame);
        if (result < 0 || frame == nullptr)
        {
            break;
        }
        input_stream_map[stream_index] = (double)frame->pts * av_q2d(input_file->m_input_file_ctx->VideoDecoder()->AvCodecCtx()->pkt_timebase) * AV_TIME_BASE;
        ProcessFilterData(data, stream_index, frame);
        for (;true;)
        {
            AVFrame * filter_frame = nullptr;
            result = data.m_filter_frame_queue.PopFrameNonBlocking(&filter_frame);
            if (result < 0 || filter_frame == nullptr)
            {
                break;
            }
            if (!data.m_output_file_ctx->m_video_init)
            {
                ProcessLibX264(data, filter_frame);
            }
            filter_frame->pict_type = AV_PICTURE_TYPE_NONE;
            EncodeFrameDataToMessageQueue(data.m_output_file_ctx->m_output_file_ctx->VideoEncoder(), filter_frame, data.m_output_file_ctx->m_encoder_packet_queue);
            ProcessPacketDataFromMessageQueue(data.m_output_file_ctx->m_encoder_packet_queue, [&data](AVPacket * pkt){

                auto encode_tm = data.m_output_file_ctx->m_output_file_ctx->VideoEncoder()->AvCodecCtx()->time_base;
                av_packet_rescale_ts(pkt, encode_tm, data.m_output_file_ctx->m_output_file_ctx->VideoMediaStream()->AvStream()->time_base);
                pkt->time_base = data.m_output_file_ctx->m_output_file_ctx->VideoMediaStream()->AvStream()->time_base;
                pkt->stream_index = data.m_output_file_ctx->m_output_file_ctx->VideoMediaStream()->Index();
                PushPacketToQueue(data,pkt);

            }, false);
            av_frame_free(&filter_frame);
        }
        av_frame_free(&frame);
    }
    if (data.m_input_stream_map[stream_index]->m_stream_link_input_file_ctx->m_is_eof)
    {
        ProcessFilterData(data, stream_index, nullptr);        
    }
}


void ProcessThread(XstackFilterDataInfo & data)
{
    std::vector<std::thread> thread_vec;
    thread_vec.reserve(data.m_input_file_map.size());
    for (auto & input_data : data.m_input_file_map)
    {
        auto index = input_data.first;
        input_data.second->m_is_eof = false;
        thread_vec.emplace_back([&data, index](){

            auto input_data = data.m_input_file_map[index];
            AVPacket * packet = av_packet_alloc();
            ReadPacketDataToMessageQueue(input_data->m_input_file_ctx, packet, input_data->m_read_packet_queue);
        });
    }
    usleep(1000 * 10);
    std::map<int, double> input_stream_map;
    for (auto & stream_data : data.m_input_stream_map)
    {
        input_stream_map[stream_data.first] = -10.0;
    }
    int index_by_mode = 0;
    while (true)
    {
        int index = int(index_by_mode%data.m_input_stream_map.size());
        index_by_mode++;
        auto & input_file = data.m_input_stream_map[index]->m_stream_link_input_file_ctx;
        AVPacket * packet = nullptr;
        int result = data.m_input_stream_map[index]->m_stream_link_input_file_ctx->m_read_packet_queue.PopPacketNonBlocking(&packet);
        if (result < 0)
        {
            if (result != AVERROR(EAGAIN) && !data.m_input_stream_map[index]->m_stream_link_input_file_ctx->m_is_eof)
            {
                DecodePacketDataToMessageQueue(input_file->m_input_file_ctx->VideoDecoder(), nullptr, data.m_input_stream_map[index]->m_decode_frame_queue);
                data.m_input_stream_map[index]->m_stream_link_input_file_ctx->m_is_eof = true;
                ProcessFrame(data, input_stream_map, index);
            }
            bool need_break = true;
            for (auto & check_data : data.m_input_file_map)
            {
                if (!check_data.second->m_is_eof)
                {
                    need_break = false;
                    break;
                }
            }
            if (need_break)
            {
                break;
            }
            continue;
        }
        if (packet->stream_index != input_file->m_input_file_ctx->VideoMediaStream()->Index() && (data.m_output_file_ctx->m_index == input_file->m_index || data.m_output_file_ctx->m_index == -1))
        {
            if (!data.m_output_file_ctx->m_audio_init)
            {
                InitAudioStreamCopyModol(data);
            }
            auto src_tm = data.m_output_file_ctx->m_output_file_ctx->AudioEncoder()->AvCodecCtx()->time_base; 
            auto dst_tm = data.m_output_file_ctx->m_output_file_ctx->AudioMediaStream()->AvStream()->time_base;
            packet->stream_index = data.m_output_file_ctx->m_output_file_ctx->AudioMediaStream()->Index();
            av_packet_rescale_ts(packet, src_tm, dst_tm); 
            PushPacketToQueue(data, packet);
            continue;
        }
        if (packet && packet->stream_index == input_file->m_input_file_ctx->VideoMediaStream()->Index())
        {
            //PrintPacketInfo(input_file->m_input_file_ctx->AvFormatContext(), packet);
            DecodePacketDataToMessageQueue(input_file->m_input_file_ctx->VideoDecoder(), packet, data.m_input_stream_map[index]->m_decode_frame_queue);
            ProcessFrame(data, input_stream_map, index);
        }
        av_packet_unref(packet);
        av_packet_free(&packet);
    }
    auto frame_size = avfilter_graph_request_oldest(data.m_filter_graph);
    printf("frame size : %d encode packet queue : %d and filter queue : %d.\n", 
            frame_size, 
            data.m_output_file_ctx->m_encoder_packet_queue.GetSize(),
            data.m_filter_frame_queue.GetSize());
    if (true)
    {
        for (;true;)
        {
            AVFrame * frame = nullptr;
            int ret = data.m_filter_frame_queue.PopFrameNonBlocking(&frame);
            if (ret < 0)
            {
                break;
            }
            printf("function : %s\tline : %d.\n",__func__, __LINE__);
            frame->pict_type = AV_PICTURE_TYPE_NONE;
            EncodeFrameDataToMessageQueue(data.m_output_file_ctx->m_output_file_ctx->VideoEncoder(), frame, data.m_output_file_ctx->m_encoder_packet_queue);
            ProcessPacketDataFromMessageQueue(data.m_output_file_ctx->m_encoder_packet_queue, [&data](AVPacket * pkt){

                //auto encode_tm = av_buffersink_get_time_base(data.m_output_file_ctx->m_filter_sink_ctx);
                auto encode_tm = data.m_output_file_ctx->m_output_file_ctx->VideoEncoder()->AvCodecCtx()->time_base;
                //printf("pts : %ld and dts : %ld  encode timebase : %d/%d.\n", pkt->pts, pkt->dts, encode_tm.num, encode_tm.den);
                av_packet_rescale_ts(pkt, encode_tm, data.m_output_file_ctx->m_output_file_ctx->VideoMediaStream()->AvStream()->time_base);
                pkt->time_base = data.m_output_file_ctx->m_output_file_ctx->VideoMediaStream()->AvStream()->time_base;
                //pkt->pos = -1;
                pkt->stream_index = data.m_output_file_ctx->m_output_file_ctx->VideoMediaStream()->Index();
                //pkt->duration = 0;
                av_interleaved_write_frame(data.m_output_file_ctx->m_output_file_ctx->AvFormatContext(), pkt);

            }, false);
            av_frame_free(&frame);
        }
        {
            EncodeFrameDataToMessageQueue(data.m_output_file_ctx->m_output_file_ctx->VideoEncoder(), nullptr, data.m_output_file_ctx->m_encoder_packet_queue);
            ProcessPacketDataFromMessageQueue(data.m_output_file_ctx->m_encoder_packet_queue, [&data](AVPacket * pkt){

                //auto encode_tm = av_buffersink_get_time_base(data.m_output_file_ctx->m_filter_sink_ctx);
                auto encode_tm = data.m_output_file_ctx->m_output_file_ctx->VideoEncoder()->AvCodecCtx()->time_base;
                //printf("pts : %ld and dts : %ld  encode timebase : %d/%d.\n", pkt->pts, pkt->dts, encode_tm.num, encode_tm.den);
                av_packet_rescale_ts(pkt, encode_tm, data.m_output_file_ctx->m_output_file_ctx->VideoMediaStream()->AvStream()->time_base);
                pkt->time_base = data.m_output_file_ctx->m_output_file_ctx->VideoMediaStream()->AvStream()->time_base;
                //pkt->pos = -1;
                pkt->stream_index = data.m_output_file_ctx->m_output_file_ctx->VideoMediaStream()->Index();
                //pkt->duration = 0;
                av_interleaved_write_frame(data.m_output_file_ctx->m_output_file_ctx->AvFormatContext(), pkt);

            }, false);
         
        }
    }
    av_write_trailer(data.m_output_file_ctx->m_output_file_ctx->AvFormatContext());
    for (auto & thd : thread_vec)
    {
        thd.join();
    }
}

int main(int argc, const char * argv[])
{
    testing::InitGoogleTest();

    //av_log_set_level(AV_LOG_DEBUG);
    // 输入文件
    // std::vector<std::string> m_input_files = {};
    std::vector<std::string> input_file_vec;
    input_file_vec.push_back("/home/hjie/data/fake/resource/test1_640x360.mp4");
    
    input_file_vec.push_back("/home/hjie/data/fake/resource/test1_640x360.mp4");
    input_file_vec.push_back("/home/hjie/data/fake/resource/test1_640x360.mp4");
    input_file_vec.push_back("/home/hjie/data/fake/resource/test1_640x360.mp4");
    //input_file_vec.push_back("/home/hjie/data/fake/resource/test2_640x360.mp4");
    //input_file_vec.push_back("/home/hjie/data/fake/resource/test3_640x360.mp4");
    //input_file_vec.push_back("/home/hjie/data/fake/resource/test4_640x360.mp4");
    //input_file_vec.push_back("/home/hjie/data/fake/resource/test5_640x360.mp4");
    //input_file_vec.push_back("/home/hjie/data/fake/resource/test8_1920x1080.mp4");

    //input_file_vec.push_back("/home/hjie/data/fake/resource/test7_1920x1080.mp4");
    
    //input_file_vec.push_back("/home/hjie/data/fake/resource/test9_1280x720.mp4");
    std::vector<std::string> two_file_vec;
    //input_file_vec.push_back("/home/hjie/data/fake/resource/test6_1280x720.mp4");
    two_file_vec.push_back("/home/hjie/data/fake/resource/test1_960x540.mp4");

    two_file_vec.push_back("/home/hjie/data/fake/resource/test2_960x540.mp4");
    //input_file_vec.emplace_back("/home/hjie/data/fake/test1.mp4");
    //input_file_vec.emplace_back("/home/hjie/data/fake/test2.mp4");
    //input_file_vec.emplace_back("/home/hjie/data/fake/test4.mp4");
    //input_file_vec.emplace_back("/home/hjie/data/fake/test1.mp4");
    //input_file_vec.emplace_back("/home/hjie/data/fake/test2.mp4");
    // input_file_vec.emplace_back("/home/hjie/data/fake/test1.mp4");

    // input_file_vec.emplace_back("/home/hjie/data/fake/test1.mp4");
    // input_file_vec.emplace_back("/home/hjie/data/fake/test1.mp4");
    // input_file_vec.emplace_back("/home/hjie/data/fake/test1.mp4");
    /* std::string begin; */
    //return RUN_ALL_TESTS();
    // return EXIT_SUCCESS;
    XstackFilterDataInfo xstack;
    InitInputFile(input_file_vec, xstack);
    //InitInputFile(two_file_vec, xstack);
    // DumpInputData(xstack);
    InitXstackFilter(xstack);
    ProcessThread(xstack);

    delete xstack.m_output_file_ctx->m_output_file_ctx;
    return 0;
}
