#include <iostream>
#include <chrono>
#include <string>

extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
}

// 生成包含本地时间的 SEI 数据并添加到帧
void add_sei_to_frame(AVFrame* frame) {
    auto now = std::chrono::system_clock::now();
    auto now_ms = std::chrono::time_point_cast<std::chrono::milliseconds>(now);
    auto epoch = now_ms.time_since_epoch();
    auto value = std::chrono::duration_cast<std::chrono::milliseconds>(epoch);
    std::string time_str = std::to_string(value.count());

    // 定义 UUID，根据标准生成唯一标识
    const uint8_t uuid[16] = {
        0x12, 0x34, 0x56, 0x78, 0x90, 0xAB, 0xCD, 0xEF,
        0x12, 0x34, 0x56, 0x78, 0x90, 0xAB, 0xCD, 0xEF
    };

    // 计算 SEI 数据总大小，包含 UUID 和时间字符串
    int payload_size = static_cast<int>(time_str.size());
    int sei_size = sizeof(uuid) + payload_size;

    // 分配内存存储 SEI 数据
    uint8_t* sei_data = static_cast<uint8_t*>(av_malloc(sei_size));
    if (!sei_data) {
        return;
    }
    // 拷贝 UUID 到 SEI 数据
    memcpy(sei_data, uuid, sizeof(uuid));
    // 拷贝时间字符串到 SEI 数据
    memcpy(sei_data + sizeof(uuid), time_str.c_str(), payload_size);

    // 创建新的帧边数据，类型为未注册的 SEI
    AVFrameSideData* side_data = av_frame_new_side_data(frame, AV_FRAME_DATA_SEI_UNREGISTERED, sei_size);
    if (side_data) {
        memcpy(side_data->data, sei_data, sei_size);
    }
    av_free(sei_data);
}

int main(int argc, char* argv[]) {
    if (argc != 3) {
        std::cerr << "Usage: " << argv[0] << " <input_file> <output_rtmp_url>" << std::endl;
        return -1;
    }

    const char* input_file = argv[1];
    const char* output_url = argv[2];

    avformat_network_init();

    // 输出部分
    AVFormatContext* output_ctx = nullptr;
    avformat_alloc_output_context2(&output_ctx, nullptr, "flv", output_url);
    if (!output_ctx) {
        std::cerr << "Could not create output context" << std::endl;
        return -1;
    }

    // 输入部分相关变量提前定义
    AVFormatContext* input_ctx = nullptr;
    int video_stream_index = -1;
    int audio_stream_index = -1;

    // 视频编码
    const AVCodec* video_codec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!video_codec) {
        std::cerr << "H.264 encoder not found" << std::endl;
        avformat_free_context(output_ctx);
        return -1;
    }
    AVCodecContext* video_codec_ctx = avcodec_alloc_context3(video_codec);
    if (!video_codec_ctx) {
        std::cerr << "Could not allocate video codec context" << std::endl;
        avformat_free_context(output_ctx);
        return -1;
    }

    // 音频编码
    const AVCodec* audio_codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
    if (!audio_codec) {
        std::cerr << "AAC encoder not found" << std::endl;
        avcodec_free_context(&video_codec_ctx);
        avformat_free_context(output_ctx);
        return -1;
    }
    AVCodecContext* audio_codec_ctx = avcodec_alloc_context3(audio_codec);
    if (!audio_codec_ctx) {
        std::cerr << "Could not allocate audio codec context" << std::endl;
        avcodec_free_context(&video_codec_ctx);
        avformat_free_context(output_ctx);
        return -1;
    }

    // 打开输出 URL
    if (!(output_ctx->oformat->flags & AVFMT_NOFILE)) {
        if (avio_open(&output_ctx->pb, output_url, AVIO_FLAG_WRITE) < 0) {
            std::cerr << "Could not open output URL" << std::endl;
            avcodec_free_context(&audio_codec_ctx);
            avcodec_free_context(&video_codec_ctx);
            avformat_free_context(output_ctx);
            return -1;
        }
    }

    // 循环推流逻辑
    while (true) {
        // 打开输入文件
        if (avformat_open_input(&input_ctx, input_file, nullptr, nullptr) != 0) {
            std::cerr << "Could not open input file" << std::endl;
            continue;
        }
        if (avformat_find_stream_info(input_ctx, nullptr) < 0) {
            std::cerr << "Could not find stream information" << std::endl;
            avformat_close_input(&input_ctx);
            continue;
        }

        video_stream_index = -1;
        audio_stream_index = -1;
        for (unsigned int i = 0; i < input_ctx->nb_streams; i++) {
            if (input_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
                video_stream_index = static_cast<int>(i);
            } else if (input_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
                audio_stream_index = static_cast<int>(i);
            }
        }

        if (video_stream_index == -1) {
            std::cerr << "Could not find video stream" << std::endl;
            avformat_close_input(&input_ctx);
            continue;
        }

        AVCodecParameters* input_video_par = input_ctx->streams[video_stream_index]->codecpar;
        avcodec_parameters_to_context(video_codec_ctx, input_video_par);
        video_codec_ctx->codec_id = AV_CODEC_ID_H264;
        video_codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
        video_codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
        // 设置编码器的时间基，参考输入视频流的时间基
        if (input_ctx->streams[video_stream_index]->time_base.num > 0 && input_ctx->streams[video_stream_index]->time_base.den > 0) {
            video_codec_ctx->time_base = input_ctx->streams[video_stream_index]->time_base;
        } else {
            // 如果输入流时间基无效，根据帧率设置
            AVRational framerate = av_guess_frame_rate(input_ctx, input_ctx->streams[video_stream_index], nullptr);
            if (framerate.num > 0 && framerate.den > 0) {
                video_codec_ctx->time_base = AVRational{framerate.den, framerate.num};
            } else {
                // 默认设置
                video_codec_ctx->time_base = AVRational{1, 25};
            }
        }
        av_opt_set(video_codec_ctx->priv_data, "preset", "fast", 0);
        video_codec_ctx->max_b_frames = 0;
        video_codec_ctx->has_b_frames = 0;
        av_opt_set(video_codec_ctx->priv_data, "tune", "zerolatency", 0);
        if (avcodec_open2(video_codec_ctx, video_codec, nullptr) < 0) {
            std::cerr << "Could not open video codec" << std::endl;
            avformat_close_input(&input_ctx);
            continue;
        }
        AVStream* video_stream = avformat_new_stream(output_ctx, video_codec);
        if (!video_stream) {
            std::cerr << "Could not create video stream" << std::endl;
            avcodec_free_context(&video_codec_ctx);
            avformat_close_input(&input_ctx);
            continue;
        }
        avcodec_parameters_from_context(video_stream->codecpar, video_codec_ctx);

        if (audio_stream_index != -1) {
            AVCodecParameters* input_audio_par = input_ctx->streams[audio_stream_index]->codecpar;
            avcodec_parameters_to_context(audio_codec_ctx, input_audio_par);
            audio_codec_ctx->codec_id = AV_CODEC_ID_AAC;
            audio_codec_ctx->codec_type = AVMEDIA_TYPE_AUDIO;
            audio_codec_ctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
            audio_codec_ctx->bit_rate = 128000;
            if (input_audio_par->sample_rate > 0) {
                audio_codec_ctx->sample_rate = input_audio_par->sample_rate;
            } else {
                audio_codec_ctx->sample_rate = 44100;
            }
            // 使用新的 channel_layout API 替代旧的 channels 和 channel_layout
            if (input_audio_par->ch_layout.nb_channels > 0) {
                av_channel_layout_copy(&audio_codec_ctx->ch_layout, &input_audio_par->ch_layout);
            } else {
                av_channel_layout_default(&audio_codec_ctx->ch_layout, 2);
            }

            if (avcodec_open2(audio_codec_ctx, audio_codec, nullptr) < 0) {
                std::cerr << "Could not open audio codec" << std::endl;
                avformat_close_input(&input_ctx);
                continue;
            }
            AVStream* audio_stream = avformat_new_stream(output_ctx, audio_codec);
            if (!audio_stream) {
                std::cerr << "Could not create audio stream" << std::endl;
                avcodec_free_context(&audio_codec_ctx);
                avformat_close_input(&input_ctx);
                continue;
            }
            avcodec_parameters_from_context(audio_stream->codecpar, audio_codec_ctx);
        }

        if (avformat_write_header(output_ctx, nullptr) < 0) {
            std::cerr << "Error writing header" << std::endl;
            avformat_close_input(&input_ctx);
            continue;
        }

        AVPacket* packet = av_packet_alloc();
        AVFrame* decoded_frame = av_frame_alloc();
        AVFrame* frame = av_frame_alloc();
        SwsContext* sws_ctx = nullptr;

        int64_t video_pts = 0;  // 新增视频 PTS 计数器
        int64_t audio_pts = 0;  // 新增音频 PTS 计数器

        while (av_read_frame(input_ctx, packet) >= 0) {
            if (packet->stream_index == video_stream_index) {
                // Bug fix: Use the correct decoder context
                if (avcodec_send_packet(video_codec_ctx, packet) < 0) {
                    std::cerr << "Error sending packet to decoder" << std::endl;
                    continue;
                }

                // From the decoder to receive the decoded frame
                while (avcodec_receive_frame(video_codec_ctx, decoded_frame) == 0) {
                    // 初始化缩放上下文
                    if (!sws_ctx) {
                        sws_ctx = sws_getContext(decoded_frame->width, decoded_frame->height,
                                                 static_cast<AVPixelFormat>(decoded_frame->format),
                                                 video_codec_ctx->width, video_codec_ctx->height,
                                                 video_codec_ctx->pix_fmt,
                                                 SWS_BILINEAR, nullptr, nullptr, nullptr);
                    }

                    // 准备输出帧
                    av_frame_unref(frame);
                    frame->format = video_codec_ctx->pix_fmt;
                    frame->width = video_codec_ctx->width;
                    frame->height = video_codec_ctx->height;
                    if (av_frame_get_buffer(frame, 0) < 0) {
                        std::cerr << "Error allocating frame buffer" << std::endl;
                        continue;
                    }

                    // 缩放帧
                    sws_scale(sws_ctx, decoded_frame->data, decoded_frame->linesize, 0,
                              decoded_frame->height, frame->data, frame->linesize);

                    // 添加 SEI 数据到帧
                    add_sei_to_frame(frame);

                    // 设置帧的 PTS
                    // 使用时间基计算精确时间戳
                    frame->pts = av_rescale_q(video_pts, 
                                            AVRational{1, video_codec_ctx->framerate.num},
                                            video_codec_ctx->time_base);
                    video_pts++;

                    // 发送帧到编码器
                    if (avcodec_send_frame(video_codec_ctx, frame) < 0) {
                        std::cerr << "Error sending frame to encoder" << std::endl;
                        continue;
                    }

                    // 从编码器接收编码后的数据包
                    AVPacket* encoded_packet = av_packet_alloc();
                    while (avcodec_receive_packet(video_codec_ctx, encoded_packet) == 0) {
                        encoded_packet->stream_index = video_stream->index;
                        av_packet_rescale_ts(encoded_packet, video_codec_ctx->time_base, video_stream->time_base);
                        av_interleaved_write_frame(output_ctx, encoded_packet);
                    }
                    av_packet_free(&encoded_packet);
                }
            } else if (packet->stream_index == audio_stream_index && audio_stream_index != -1) {
                // 设置音频数据包的 PTS
                if (packet->pts == AV_NOPTS_VALUE) {
                    // 确保初始时间戳从0开始
                    packet->pts = (audio_pts == 0) ? 0 : audio_pts;
                    packet->dts = packet->pts;
                    
                    // 根据采样率计算持续时间
                    const int64_t duration = av_rescale_q(1, AVRational{1, audio_codec_ctx->sample_rate},
                                                        output_ctx->streams[audio_stream_index]->time_base);
                    
                    // 严格递增检查
                    if (duration <= 0) {
                        std::cerr << "Invalid audio duration calculation" << std::endl;
                        continue;
                    }
                    audio_pts += duration;
                }

                packet->stream_index = output_ctx->streams[audio_stream_index]->index;
                // 使用计算后的标准持续时间
                const int64_t std_duration = av_rescale_q(packet->duration, 
                                                         input_ctx->streams[audio_stream_index]->time_base,
                                                         output_ctx->streams[audio_stream_index]->time_base);
                
                packet->pts = audio_pts;
                packet->dts = audio_pts;
                
                // 有效性检查
                if (std_duration <= 0) {
                    std::cerr << "Invalid packet duration: " << std_duration << std::endl;
                    av_packet_unref(packet);
                    continue;
                }
                audio_pts += std_duration;
                av_interleaved_write_frame(output_ctx, packet);
            }
            av_packet_unref(packet);
        }

        // 释放本次循环使用的资源
        av_packet_free(&packet);
        av_frame_free(&decoded_frame);
        av_frame_free(&frame);
        if (sws_ctx) {
            sws_freeContext(sws_ctx);
        }
        avformat_close_input(&input_ctx);
    }

    // 程序正常不会执行到这里，仅为代码完整性
    av_write_trailer(output_ctx);
    if (!(output_ctx->oformat->flags & AVFMT_NOFILE)) {
        avio_closep(&output_ctx->pb);
    }
    avcodec_free_context(&audio_codec_ctx);
    avcodec_free_context(&video_codec_ctx);
    avformat_free_context(output_ctx);

    return 0;
}