//#ifdef _cplusplus
extern "C" {
//#endif
#include <libavcodec/avcodec.h>
#include <libavdevice/avdevice.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
#include <libavutil/imgutils.h>
#include <libavutil/time.h>
#include <libswscale/swscale.h>
//#ifdef _cplusplus
}
//#endif

#include <pthread.h>
#include <sys/time.h>
#include <unistd.h>
#include <cassert>
#include <cstdlib>
#include <cstring>
#include <iostream>

int main(int argc, char *argv[]) {
    assert(argc >= 2);

    int i;
    int videoindex = -1;

    AVFormatContext *fmtctx;
    AVFormatContext *ofmt_ctx;
    AVCodecContext *decoder_ctx;
    AVCodecContext *encoder_ctx;
    AVCodec *decoder;
    AVCodec *encoder;
    AVPacket *pkt;
    AVFrame *frame;
    AVDictionary *opt;

    int ret;
    AVPacket *encoded_pkt;
#ifdef IMG_CONVERT
    struct SwsContext *img_convert_ctx;
#endif
    const char *filename;

    filename = argv[1];

    // Init
    av_register_all();
    avdevice_register_all();
    avformat_network_init();

    // Input and Decode
    opt = NULL;
    fmtctx = NULL;
    av_dict_set(&opt, "protocol_whitelist", "file,udp,rtp", 0);
    if (avformat_open_input(&fmtctx, filename, NULL, &opt)) {
        fprintf(stderr, "%s\n", "Failed to call av_format_open_input");
        return -1;
    }
    av_dict_free(&opt);

    if (avformat_find_stream_info(fmtctx, NULL) < 0) {
        printf("Couldn't find stream information.\n");
        return -1;
    }

    for (i = 0; i < (int)fmtctx->nb_streams; i++) {
        if (fmtctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoindex = i;
            break;
        }
    }

    if (videoindex == -1) {
        printf("Couldn't find a video stream.\n");
        return -1;
    }

    AVCodecParameters *decoder_param;
    decoder_param = fmtctx->streams[videoindex]->codecpar;
    decoder = avcodec_find_decoder(decoder_param->codec_id);
    if (!decoder) {
        printf("Couldn't find a decoder.\n");
        return -1;
    }

    decoder_ctx = avcodec_alloc_context3(decoder);
    if (!decoder_ctx) {
        printf("Couldn't alloc a decoder ctx.\n");
        return -1;
    }

    avcodec_parameters_to_context(decoder_ctx, decoder_param);
    if (avcodec_open2(decoder_ctx, decoder, NULL) != 0) {
        printf("Could not open codec.\n");
        return -1;
    }

    pkt = av_packet_alloc();
    av_init_packet(pkt);
    frame = av_frame_alloc();

    // Encode and output
    avformat_alloc_output_context2(&ofmt_ctx, NULL, "sdl2", NULL);
    encoder = avcodec_find_encoder_by_name("rawvideo");
    encoder_ctx = avcodec_alloc_context3(encoder);
    if (NULL == encoder_ctx) {
        fprintf(stderr, "Failed to call %s\n", __func__);
    }
    encoder_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
    encoder_ctx->width = decoder_ctx->width;
    encoder_ctx->height = decoder_ctx->height;
    encoder_ctx->time_base.num = 1;
    encoder_ctx->time_base.den = 25;

    if (avcodec_open2(encoder_ctx, encoder, NULL) != 0) {
        printf("Could not open encoder.\n");
        return -1;
    }

    // Add a new stream to output,should be called by the user before
    // avformat_write_header() for muxing
    AVStream *video_st;
    video_st = avformat_new_stream(ofmt_ctx, encoder);
    if (video_st == NULL) {
        return -1;
    }

    avcodec_parameters_from_context(video_st->codecpar, encoder_ctx);

    // Show some Information
    av_dump_format(fmtctx, 0, NULL, 0);
    av_dump_format(ofmt_ctx, 0, NULL, 1);

    // Write File Header
    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret != 0) {
        printf("Failed to write header.\n");
        return -1;
    }

#ifdef IMG_CONVERT
    img_convert_ctx = sws_getContext(decoder_ctx->width, decoder_ctx->height,
                                     decoder_ctx->pix_fmt, decoder_ctx->width,
                                     decoder_ctx->height, AV_PIX_FMT_YUV420P,
                                     SWS_BICUBIC, NULL, NULL, NULL);
    AVFrame *pFrameYUV;
    pFrameYUV = av_frame_alloc();
    pFrameYUV->width = decoder_ctx->width;
    pFrameYUV->height = decoder_ctx->height;
    pFrameYUV->format = AV_PIX_FMT_YUV420P;
    av_frame_get_buffer(pFrameYUV, 1);
#endif

    encoded_pkt = av_packet_alloc();
    uint64_t framecnt = 0;
    int64_t start_time = av_gettime();
    while (av_read_frame(fmtctx, pkt) >= 0) {
        ret = avcodec_send_packet(decoder_ctx, pkt);
        if (ret != 0) {
            printf("Feed decoder error.\n");
            break;
        }

        ret = avcodec_receive_frame(decoder_ctx, frame);
        if (ret != 0) {
            if (ret == AVERROR(EAGAIN)) {
                continue;
            } else {
                char error_buf[1024];
                av_strerror(ret, error_buf, sizeof(error_buf));
                printf("Receive_frame_error: %s.\n", error_buf);
            }
        }

#ifdef IMG_CONVERT
        sws_scale(img_convert_ctx, (const unsigned char *const *)frame->data,
                  frame->linesize, 0, decoder_ctx->height, pFrameYUV->data,
                  pFrameYUV->linesize);
        avcodec_send_frame(encoder_ctx, pFrameYUV);
#endif
        avcodec_send_frame(encoder_ctx, frame);
        ret = avcodec_receive_packet(encoder_ctx, encoded_pkt);
        if (ret != 0) {
            char error_buf[1024];
            av_strerror(ret, error_buf, sizeof(error_buf));
            printf("Receive_packet_error: %s.\n", error_buf);
        }

        av_frame_unref(frame);
        av_packet_unref(pkt);

        encoded_pkt->stream_index = video_st->index;

        // Write PTS
        AVRational time_base = ofmt_ctx->streams[videoindex]->time_base;      //
        AVRational r_framerate1 = fmtctx->streams[videoindex]->r_frame_rate;  //
        AVRational time_base_q = {1, AV_TIME_BASE};
        // Duration between 2 frames (us)
        int64_t calc_duration =
            (double)(AV_TIME_BASE) * (1 / av_q2d(r_framerate1));  //内部时间戳
        // Parameters
        // encoded_pkt->pts =
        // (double)(framecnt*calc_duration)*(double)(av_q2d(time_base_q)) /
        // (double)(av_q2d(time_base));
        encoded_pkt->pts =
            av_rescale_q(framecnt++ * calc_duration, time_base_q, time_base);
        encoded_pkt->dts = encoded_pkt->pts;
        encoded_pkt->duration = av_rescale_q(
            calc_duration, time_base_q,
            time_base);  //(double)(calc_duration)*(double)(av_q2d(time_base_q))
                         /// (double)(av_q2d(time_base));
        encoded_pkt->pos = -1;

        // Delay
        int64_t pts_time =
            av_rescale_q(encoded_pkt->dts, time_base, time_base_q);
        int64_t now_time = av_gettime() - start_time;
        if (pts_time > now_time) av_usleep(pts_time - now_time);

        // ret = av_interleaved_write_frame(ofmt_ctx, encoded_pkt);
        video_st->cur_dts = AV_NOPTS_VALUE;
        ret = av_write_frame(ofmt_ctx, encoded_pkt);

        av_packet_unref(encoded_pkt);
    }

    // av_packet_free(&pkt);
    // av_frame_free(&frame);
    av_write_trailer(ofmt_ctx);
    avformat_close_input(&fmtctx);

    return 0;
}
