#include <iostream>
#include <opencv2/opencv.hpp>
#include <sys/time.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
}
using namespace cv;
using namespace std;
#define ENABLE 1
int averror( int ret ) {
    if ( ret < 0 ) {
        char buf[512];
        av_strerror( ret, buf, sizeof( buf ) - 1 );
        fprintf( stderr, "%s\n", buf );
    }
    return ret;
}

void imagehandle( cv::Mat& img );

int main( int argc, char* argv[] ) {

    char* inUrl = "rtsp://admin:ad53937301@192.168.1.64:554/h264/ch1/main/av_stream";

    // nginx-rtmp live server url
    char* outUrl = "rtmp://192.168.1.167/live/stream";

    avcodec_register_all();
    // register all MUX
    av_register_all();
    // register all net protocal
    avformat_network_init();

    VideoCapture cam;
    Mat frame;
    // Frame fromat converting context
    SwsContext* vsc = NULL;

    // Output data structure--YUV
    AVFrame* yuv = NULL;

    // Encoder context
    AVCodecContext* vc = NULL;

    // rtmp flv MUX
    AVFormatContext* ic = NULL;

    try {
        /////////////////////////////////////////////////////////////////////////////////////
        /// 1. Open Cam
        cam.open( inUrl );
        if ( !cam.isOpened() ) {
            printf( "cam open failed" );
        }
        cout << "cam open sucess" << endl;
        /// 2. Init SwsContext

        int inWidth = cam.get( CAP_PROP_FRAME_WIDTH );
        int inHeight = cam.get( CAP_PROP_FRAME_HEIGHT );
        int fps = cam.get( CAP_PROP_FPS );
        if ( fps == 0 ) {
            fps = 25;
        }
        cout << fps << endl;

        vsc = sws_getCachedContext( vsc, inWidth, inHeight, AV_PIX_FMT_BGR24, inWidth, inHeight, AV_PIX_FMT_YUV420P,
                                    SWS_BICUBIC, 0, 0, 0 );
        if ( !vsc ) {
            printf( "sws_getCachedContext failed" );
        }

        /// 3. Init Output data structure--YUV
        yuv = av_frame_alloc();
        yuv->format = AV_PIX_FMT_YUV420P;
        yuv->width = inWidth;
        yuv->height = inHeight;
        yuv->pts = 0;
        // allocate the space for the data of YUV
        int ret = av_frame_get_buffer( yuv, 32 );
        if ( ret != 0 ) {
            char buf[1024] = {0};
            av_strerror( ret, buf, sizeof( buf ) - 1 );
            printf( buf );
        }

//-==========================================================================================================================
/// 4. Init Encode context
// a. Find Encoder
#if ( !ENABLE )
        std::cout << "====>manul codec<====" << std::endl;

        AVCodec* codec = avcodec_find_encoder( AV_CODEC_ID_H264 );
        if ( !codec ) {
            printf( "Can't find H.264 encoder" );
        }
        // b. Create encoder context
        vc = avcodec_alloc_context3( codec );
        if ( !vc ) {
            printf( "avcodec_alloc_context3 failed" );
        }

        // c. Config Encoder
        vc->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
        vc->codec_id = codec->id;
        vc->thread_count = 8;
        vc->bit_rate = 50 * 1024 * 8; // video size(bits) per second: 50kByte
        vc->width = inWidth;
        vc->height = inHeight;

        vc->time_base = {1, fps}; // used to calculate pts: pts*time_base = second

        vc->framerate = {fps, 1};
        vc->gop_size = 50;    // for how many frames there is a I frame
        vc->max_b_frames = 0; // if these is no B frames, the orders of both decoding and presentation will be the
        vc->pix_fmt = AV_PIX_FMT_YUV420P;
#endif
//-==========================================================================================================================
#if ( ENABLE )
        std::cout << "====>copy codec<====" << std::endl;
        AVFormatContext* ifmt_ctx = nullptr;
        ret = avformat_open_input( &ifmt_ctx, inUrl, nullptr, nullptr );
        averror( ret );
        ret = avformat_find_stream_info( ifmt_ctx, NULL );
        averror( ret );

        int videostream_index = av_find_best_stream( ifmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0 );
        averror( ret );
        AVStream* instream = ifmt_ctx->streams[videostream_index];
        //查找解码器
        AVCodec* codec = avcodec_find_encoder( instream->codecpar->codec_id );
        if ( codec == NULL ) {
            averror( 0 );
        }
        AVCodecParameters* codecpar = avcodec_parameters_alloc();
        ret = avcodec_parameters_copy( codecpar, instream->codecpar ); //赋值配置信息
        averror( ret );
        codecpar->codec_tag = 0;
        vc = avcodec_alloc_context3( codec );
        ret = avcodec_parameters_to_context( vc, codecpar );
        vc->time_base = {1, fps}; // used to calculate pts: pts*time_base = second
        averror( ret );
        avformat_close_input( &ifmt_ctx );
#endif
        //-==========================================================================================================================

        // d. Open encodder context
        ret = avcodec_open2( vc, 0, 0 );
        if ( ret != 0 ) {
            char buf[1024] = {0};
            av_strerror( ret, buf, sizeof( buf ) - 1 );
            printf( buf );
        }
        cout << "avcodec_open2 successed!" << endl;

        /// 5. Config MUX and stream for output
        // a. Create context for MUX
        ret = avformat_alloc_output_context2( &ic, 0, "flv", outUrl );
        if ( ret != 0 ) {
            char buf[1024] = {0};
            av_strerror( ret, buf, sizeof( buf ) - 1 );
            printf( buf );
        }
        // b. Add video stream
        AVStream* vs = avformat_new_stream( ic, NULL );
        if ( !vs ) {
            printf( "avformat_new_stream failed" );
        }
        // vs->codecpar->codec_tag = 0;
        avcodec_parameters_copy( vs->codecpar, codecpar );

        // copy  parameter from Encoder to MUX
        avcodec_parameters_from_context( vs->codecpar, vc );
        av_dump_format( ic, 0, outUrl, 1 );

        /// 6. Open rtmp output IO
        ret = avio_open( &ic->pb, outUrl, AVIO_FLAG_WRITE );
        if ( ret != 0 ) {
            char buf[1024] = {0};
            av_strerror( ret, buf, sizeof( buf ) - 1 );
            printf( buf );
        }
        // write mux header
        ret = avformat_write_header(
            ic, NULL ); // after this operation the stream's time_base will also be changed, not vc->time_base anymore
        if ( ret != 0 ) {
            char buf[1024] = {0};
            av_strerror( ret, buf, sizeof( buf ) - 1 );
            printf( buf );
        }

        AVPacket pack;
        memset( &pack, 0, sizeof( pack ) );
        int vpts = 0;
        for ( ;; ) {
            /// read rtsp video frame & decode the frame to YUV
            if ( !cam.grab() ) {
                continue;
            }
            /// convert YUV to RGB
            if ( !cam.retrieve( frame ) ) {
                continue;
            }
            imagehandle( frame );
            /// convert RGB to YUV
            // Input data structure--RGB
            uint8_t* indata[AV_NUM_DATA_POINTERS] = {0}; // srcStride
            indata[0] = frame.data;
            int inlinesize[AV_NUM_DATA_POINTERS] = {0}; // srcSlice
            //
            // inlinesize[0] = inWidth * 3;
            inlinesize[0] = frame.cols * frame.elemSize();

            int h = sws_scale( vsc, indata, inlinesize, 0, frame.rows, yuv->data, yuv->linesize );
            if ( h <= 0 ) {
                continue;
            }

            // cout << h << " " << flush;

            /// Mux YUV to flv h.264
            yuv->pts = vpts;
            vpts++;
            ret = avcodec_send_frame( vc, yuv );
            if ( ret != 0 ) {
                continue;
            }
            ret = avcodec_receive_packet( vc, &pack );
            if ( ret != 0 || pack.size > 0 ) {
                // cout << '*' <<pack.size<< flush;
            } else {
                continue;
            }

            /// Push stream
            pack.pts = av_rescale_q( pack.pts, vc->time_base, vs->time_base );
            pack.dts = av_rescale_q( pack.dts, vc->time_base, vs->time_base );
            ret = av_interleaved_write_frame( ic, &pack );
            if ( ret == 0 ) {
                cout << '#' << flush;
            }
        }
    } catch ( exception& ex ) {

        if ( cam.isOpened() )
            cam.release();
        if ( vsc ) {
            sws_freeContext( vsc );
            vsc = NULL;
        }
        if ( vc ) {
            avio_closep( &ic->pb );
            avcodec_free_context( &vc );
        }
        cerr << ex.what() << endl;
    }
    getchar();

    return 0;
}

/**
 * @description: 融合
 * @param bgr:正常图片 ir:红外
 * @return:
 * @author: xudongxu
 */
int run( cv::Mat bgr, cv::Mat ir ) {
    try {
        // double start_time = GetWallTime();
        // Get sizes
        int bgr_height = bgr.rows;
        int bgr_width = bgr.cols;
        int ir_height = ir.rows;
        int ir_width = ir.cols;

        // Choose two points from bgr image
        cv::Point bgr_pt1( 425, 160 );
        cv::Point bgr_pt2( bgr_width - 354, bgr_height - 72 );

        // Defined corners
        std::vector<Point2f> bgr_corners;
        bgr_corners.push_back( Point2f( bgr_pt1.x, bgr_pt1.y ) );
        bgr_corners.push_back( Point2f( bgr_pt2.x, bgr_pt1.y ) );
        bgr_corners.push_back( Point2f( bgr_pt2.x, bgr_pt2.y ) );
        bgr_corners.push_back( Point2f( bgr_pt1.x, bgr_pt2.y ) );
        std::vector<Point2f> ir_corners;
        ir_corners.push_back( Point2f( 0, 0 ) );
        ir_corners.push_back( Point2f( ir_width - 1, 0 ) );
        ir_corners.push_back( Point2f( ir_width - 1, ir_height - 1 ) );
        ir_corners.push_back( Point2f( 0, ir_height - 1 ) );

        cv::Mat h = cv::findHomography( bgr_corners, ir_corners, cv::RANSAC, 10 );
        cv::Mat bgr_crop;
        cv::warpPerspective( bgr, bgr_crop, h, cv::Size( ir_width, ir_height ) );
        cv::Mat y_crop;
        cv::cvtColor( bgr_crop, y_crop, cv::COLOR_BGR2GRAY );
        cv::Mat ir_ycrcb;
        cv::cvtColor( ir, ir_ycrcb, cv::COLOR_BGR2YCrCb );

        cv::resize( y_crop, y_crop, cv::Size( ir_ycrcb.cols, ir_ycrcb.rows ) );

        std::vector<Mat> ir_channels;
        cv::split( ir_ycrcb, ir_channels ); //利用vector对象分离

        //对拆分的通道数据合并
        Mat new_channels[3] = {y_crop, ir_channels[1], ir_channels[2]}; //定义合并的输出矩阵
        Mat merged_image;
        cv::merge( new_channels, 3, merged_image );

        cv::Mat fusion_bgr;
        cv::cvtColor( merged_image, fusion_bgr, cv::COLOR_YCrCb2BGR );

        //cv::imwrite( name, fusion_bgr );
        return 0;
    } catch ( const cv::Exception& e ) {
        std::cerr << e.what() << '\n';
        return -1;
    } catch ( const std::exception& e ) {
        std::cerr << e.what() << '\n';
        return -1;
    } catch ( ... ) {
        return -1;
    }
}

/**
 * @description: 图像处理回调
 * @param {type}
 * @return:
 * @author: xudongxu
 */
void imagehandle( cv::Mat& img ) {
    cv::rectangle( img, cv::Point( 100, 100 ), cv::Point( 600, 600 ), Scalar( 0, 255, 0 ), 2, 8, 0 );
}