extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
#include <libavdevice/avdevice.h>
#include <libavutil/pixdesc.h>  // 添加这行以支持 av_get_pix_fmt_name
}
#include <iostream>
#include <thread>
#include <chrono>
#include <signal.h>
#include <functional>

volatile sig_atomic_t stop_flag = 0;
void signal_handler(int signal) {
    stop_flag = 1;
}

int main(int argc, char* argv[]) {
    //实现ctrl+c 信号捕获安全退出， av_write_trailer() 始终被调用，防止 moov atom not found 错误
    signal(SIGINT, signal_handler);
    signal(SIGTERM, signal_handler);
    avdevice_register_all();
    avformat_network_init();
    av_log_set_level(AV_LOG_DEBUG);

    const int OUT_WIDTH = 1280;
    const int OUT_HEIGHT = 720;
    const int FPS = 30;

    // ================== 初始化输出文件上下文 ==================
    AVFormatContext* formatCtx = nullptr;
    avformat_alloc_output_context2(&formatCtx, NULL, "mp4", "output.mp4");
    if (!formatCtx) {
        std::cerr << "Could not create output context" << std::endl;
        return -1;
    }

    const AVOutputFormat* outputFmt = formatCtx->oformat;
    const AVCodec* encoder = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!encoder) {
        std::cerr << "Unsupported codec!" << std::endl;
        return -1;
    }

    AVStream* videoStm = avformat_new_stream(formatCtx, encoder);
    if (!videoStm) {
        std::cerr << "Failed to allocate stream" << std::endl;
        return -1;
    }

    AVCodecContext* codecCtx = avcodec_alloc_context3(encoder);
    if (!codecCtx) {
        std::cerr << "Failed to allocate codec context" << std::endl;
        return -1;
    }

    codecCtx->codec_id = AV_CODEC_ID_H264;
    codecCtx->bit_rate = 4000000;
    codecCtx->width = OUT_WIDTH;
    codecCtx->height = OUT_HEIGHT;
    codecCtx->time_base = (AVRational){1, FPS};
    codecCtx->framerate = (AVRational){FPS, 1};
    codecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
    codecCtx->gop_size = 100;
    codecCtx->max_b_frames = 0;
    codecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

    AVDictionary* opts = nullptr;
    av_dict_set(&opts, "preset", "ultrafast", 0);
    av_dict_set(&opts, "tune", "film", 0);
    av_dict_set(&opts, "crf", "23", 0);

    if (avcodec_open2(codecCtx, encoder, &opts) < 0) {
        std::cerr << "Could not open codec" << std::endl;
        return -1;
    }

    if (avcodec_parameters_from_context(videoStm->codecpar, codecCtx) < 0) {
        std::cerr << "Failed to copy codec parameters to stream" << std::endl;
        return -1;
    }

    if ((outputFmt->flags & AVFMT_NOFILE) == 0) {
        if (avio_open(&formatCtx->pb, "output.mp4", AVIO_FLAG_WRITE) < 0) {
            std::cerr << "Could not open output file" << std::endl;
            return -1;
        }
    }

    if (avformat_write_header(formatCtx, nullptr) < 0) {
        std::cerr << "Error writing header" << std::endl;
        return -1;
    }

    // ================== 初始化音频流 ==================
    const int SAMPLE_RATE = 48000;
    const enum AVSampleFormat SAMPLE_FMT = AV_SAMPLE_FMT_FLTP;
    const int CHANNELS = 2;
    const int64_t CHANNEL_LAYOUT = AV_CH_LAYOUT_STEREO;

    AVStream* audioStm = avformat_new_stream(formatCtx, nullptr);
    if (!audioStm) {
        std::cerr << "Failed to create audio stream" << std::endl;
        return -1;
    }

    const AVCodec* aacEncoder = avcodec_find_encoder(AV_CODEC_ID_AAC);
    AVCodecContext* audioCodecCtx = avcodec_alloc_context3(aacEncoder);
    if (!audioCodecCtx) {
        std::cerr << "Could not allocate audio codec context" << std::endl;
        return -1;
    }

    audioCodecCtx->sample_rate = SAMPLE_RATE;
    audioCodecCtx->channel_layout = CHANNEL_LAYOUT;
    audioCodecCtx->channels = CHANNELS;
    audioCodecCtx->sample_fmt = aacEncoder->sample_fmts[0]; // 推荐格式
    audioCodecCtx->bit_rate = 128000;
    audioCodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

    // 设置音频参数选项
    AVDictionary* aacOptions = nullptr;
    av_dict_set(&aacOptions, "profile", "aac_he", 0); // 可选编码 profile
    av_dict_set(&aacOptions, "preset", "fast", 0);

    if (avcodec_open2(audioCodecCtx, aacEncoder, &aacOptions) < 0) {
        std::cerr << "Could not open audio encoder" << std::endl;
        return -1;
    }
    
    if (avcodec_parameters_from_context(audioStm->codecpar, audioCodecCtx) < 0) {
        std::cerr << "Failed to copy audio codec parameters" << std::endl;
        return -1;
    }
   
    // ================== 打开麦克风输入 ==================
    AVFormatContext* audioInputFmtCtx = nullptr;
    const AVInputFormat* audioInputFmt = av_find_input_format("avfoundation");
    AVDictionary* audioOptions = nullptr;
    av_dict_set(&audioOptions, "sample_rate", "48000", 0);
    av_dict_set(&audioOptions, "channels", "2", 0);

    if (avformat_open_input(&audioInputFmtCtx, ":none", audioInputFmt, &audioOptions) != 0) {
        std::cerr << "Cannot open microphone input" << std::endl;
        return -1;
    }

    if (avformat_find_stream_info(audioInputFmtCtx, nullptr) < 0) {
        std::cerr << "Cannot find audio stream info" << std::endl;
        return -1;
    }

    int audioStreamIdx = -1;
    for (int i = 0; i < audioInputFmtCtx->nb_streams; ++i) {
        if (audioInputFmtCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
            audioStreamIdx = i;
            break;
        }
    }

    if (audioStreamIdx == -1) {
        std::cerr << "Could not find audio stream" << std::endl;
        return -1;
    }

    const AVCodec* audioDecoder = avcodec_find_decoder(audioInputFmtCtx->streams[audioStreamIdx]->codecpar->codec_id);
    AVCodecContext* audioDecoderCtx = avcodec_alloc_context3(audioDecoder);
    avcodec_parameters_to_context(audioDecoderCtx, audioInputFmtCtx->streams[audioStreamIdx]->codecpar);

    if (avcodec_open2(audioDecoderCtx, audioDecoder, NULL) < 0) {
        std::cerr << "Could not open audio decoder" << std::endl;
        return -1;
    }
    // ================== 音频重采样上下文 ==================
    SwrContext* swrCtx = swr_alloc_set_opts(nullptr,
        CHANNEL_LAYOUT, SAMPLE_FMT, SAMPLE_RATE,
        audioDecoderCtx->channel_layout,
        audioDecoderCtx->sample_fmt,
        audioDecoderCtx->sample_rate,
        0, nullptr);
    swr_init(swrCtx);


    // ================== 设置输入设备 ==================
    AVFormatContext* inputFmtCtx = nullptr;
    const AVInputFormat* inputFmt = av_find_input_format("avfoundation");

    AVDictionary* options = nullptr;
    av_dict_set(&options, "video_size", "1280x720", 0);  // 根据你的屏幕调整
    av_dict_set(&options, "framerate", "30", 0);
    av_dict_set(&options, "pixel_format", "bgr0", 0);
    //av_dict_set(&options, "capture_cursor", "1", 0);     //鼠标指针显示
    av_dict_set(&options, "mouse_size", "32x32", 0); // 强制指定鼠标大小
    //窗口录制：avformat_open_input(..., "window:窗口ID", ...);

    if (avformat_open_input(&inputFmtCtx, "1", inputFmt, &options) != 0) {
        std::cerr << "Cannot open screen input" << std::endl;
        return -1;
    }

    inputFmtCtx->probesize = 10 * 1024 * 1024;
    inputFmtCtx->max_analyze_duration = 5 * AV_TIME_BASE;

    if (avformat_find_stream_info(inputFmtCtx, nullptr) < 0) {
        std::cerr << "Cannot find input stream information" << std::endl;
        return -1;
    }

    int videoStreamIdx = -1;
    for (int i = 0; i < inputFmtCtx->nb_streams; ++i) {
        if (inputFmtCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoStreamIdx = i;
            break;
        }
    }

    if (videoStreamIdx == -1) {
        std::cerr << "Could not find video stream" << std::endl;
        return -1;
    } else {
        AVCodecParameters* codecPar = inputFmtCtx->streams[videoStreamIdx]->codecpar;
        const AVCodec* decoder = avcodec_find_decoder(codecPar->codec_id);
    
        std::cout << "Input Video Codec: " << decoder->name << " (" << decoder->long_name << ")" << std::endl;
        std::cout << "Codec ID: " << codecPar->codec_id << std::endl;
        std::cout << "Pixel Format: " << av_get_pix_fmt_name(static_cast<AVPixelFormat>(codecPar->format)) << std::endl;
        std::cout << "Width x Height: " << codecPar->width << "x" << codecPar->height << std::endl;    
    }

    const AVCodec* decoder = avcodec_find_decoder(inputFmtCtx->streams[videoStreamIdx]->codecpar->codec_id);
    AVCodecContext* decoderCtx = avcodec_alloc_context3(decoder);
    avcodec_parameters_to_context(decoderCtx, inputFmtCtx->streams[videoStreamIdx]->codecpar);

    if (avcodec_open2(decoderCtx, decoder, NULL) < 0) {
        std::cerr << "Could not open decoder" << std::endl;
        return -1;
    }

    // ================== 分配帧与缩放上下文 ==================
    AVFrame* srcFrame = av_frame_alloc();
    AVFrame* dstFrame = av_frame_alloc();
    AVPacket* pkt = av_packet_alloc();

    dstFrame->format = AV_PIX_FMT_YUV420P;
    dstFrame->width = OUT_WIDTH;
    dstFrame->height = OUT_HEIGHT;
    av_frame_get_buffer(dstFrame, 32);

    SwsContext* swsCtx = sws_getContext(
        decoderCtx->width, decoderCtx->height, decoderCtx->pix_fmt,
        OUT_WIDTH, OUT_HEIGHT, AV_PIX_FMT_YUV420P,
        SWS_BILINEAR, NULL, NULL, NULL);

    // ================== 主循环：读取 → 解码 → 转换 → 编码 → 写入 ==================
    int64_t pts = 0;
    bool running = true;
    //signal(SIGINT, [&](int) { running = false; }); // Ctrl+C 终止录制   mac objective-c 不支持

    int frames_received = 0;
    int frames_writed = 0;

    while (running && !stop_flag) {
        AVPacket* pkt = av_packet_alloc();
        // 从输入流中读取一帧数据并存储到packet中,参数inputFmtCtx是输入流的格式上下文，用于描述输入流的信息
        // 参数pkt是一个AVPacket结构体，用于存储读取到的帧数据
        // 返回值是0表示成功，负值表示读取过程中出现错误
        int res = av_read_frame(inputFmtCtx, pkt);
        if (res < 0) { 
            if (res == AVERROR(EAGAIN)) {
                //std::cerr << "No frame available yet, retrying..." << std::endl;
                std::this_thread::sleep_for(std::chrono::milliseconds(5)); // 减少 CPU 占用
                av_packet_unref(pkt);
                continue; // 等待下一帧
            } else{
                char errbuf[1024];
                av_strerror(res, errbuf, sizeof(errbuf));
                std::cerr << "av_read_frame failed: " << errbuf << std::endl;
                av_packet_unref(pkt);
                break;
            }
        }
        std::cout << "Received pkt size " << pkt->size << std::endl;
        if (pkt->stream_index == videoStreamIdx) {
            //向解码器发送数据包以进行解码。用于将编码的视频数据包发送到解码器进行处理,数据包包含的是编码后的视频数据
            int ret = avcodec_send_packet(decoderCtx, pkt);
            while (ret >= 0) {
                //接收解码器输出的视频帧,decoderCtx 是解码器的上下文，包含了解码器的配置和状态信息
                // srcFrame 是一个用于存储解码后视频帧的缓冲区
                ret = avcodec_receive_frame(decoderCtx, srcFrame);
                if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break;
                else if (ret < 0) {
                    std::cerr << "Error during decoding" << std::endl;
                    return -1;
                }
                // 打印帧类型
                switch (srcFrame->pict_type) {
                    case AV_PICTURE_TYPE_I:
                        std::cout << "Decoded frame is an I frame" << std::endl;
                        break;
                    case AV_PICTURE_TYPE_P:
                        std::cout << "Decoded frame is a P frame" << std::endl;
                        break;
                    case AV_PICTURE_TYPE_B:
                        std::cout << "Decoded frame is a B frame" << std::endl;
                        break;
                    default:
                        std::cout << "Unknown frame type: " << srcFrame->pict_type << std::endl;
                        break;
                }
                frames_received++;
                // 转换像素格式
                sws_scale(swsCtx, srcFrame->data, srcFrame->linesize,
                            0, decoderCtx->height,
                            dstFrame->data, dstFrame->linesize);

                std::cout<<"Received frame: pts="<<srcFrame->pts<<std::endl;
                // 或者使用 pts 自增
                dstFrame->pts = pts++;

                // 或者基于帧率计算（也可）
                //dstFrame->pts = static_cast<int64_t>(pts++ * (1.0 / FPS) / av_q2d(codecCtx->time_base));
                

                //---------------------------- 编码-------------------------
                /** 向编码器发送一帧视频数据进行编码。
                * @param codecCtx 编码器上下文，包含了编码器的配置信息和内部状态。
                * @param dstFrame 待编码的视频帧数据。
                * @return 0 表示成功，负值表示编码过程中出现错误。
                **/
                if (avcodec_send_frame(codecCtx, dstFrame) < 0) {
                    std::cerr << "Error sending frame to encoder" << std::endl;
                    return -1;
                }

                while (avcodec_receive_packet(codecCtx, pkt) >= 0) {
                    av_packet_rescale_ts(pkt, codecCtx->time_base, videoStm->time_base);
                    pkt->stream_index = videoStm->index;
                        // 判断是否为关键帧
                    if (pkt->flags & AV_PKT_FLAG_KEY) {
                        std::cout << "Writing keyframe (I frame)" << std::endl;
                    }
                    //该函数负责将准备好的数据包(pkt)交错写入到文件中。交错写入是为了保证媒体文件的播放流畅性
                    av_interleaved_write_frame(formatCtx, pkt);
                    av_packet_unref(pkt);
                    frames_writed++;
                }
                // 编码完成后，获取编码器输出的帧以查看 pict_type
                std::cout << "Received frame: pts=" << dstFrame->pts
                    << std::endl;
            }
        }
        av_packet_unref(pkt);
    }

    // ================== Flush 编码器 ==================
    avcodec_send_frame(codecCtx, NULL);
    while (avcodec_receive_packet(codecCtx, pkt) >= 0) {
        av_packet_rescale_ts(pkt, codecCtx->time_base, videoStm->time_base);
        pkt->stream_index = videoStm->index;
        av_interleaved_write_frame(formatCtx, pkt);
        av_packet_unref(pkt);
        frames_writed++;
    }

    // ================== 清理资源 ==================
    av_write_trailer(formatCtx);
    avformat_close_input(&inputFmtCtx);
    avformat_free_context(formatCtx);
    avcodec_free_context(&codecCtx);
    avcodec_free_context(&decoderCtx);
    av_frame_free(&srcFrame);
    av_frame_free(&dstFrame);
    av_packet_free(&pkt);
    sws_freeContext(swsCtx);

    std::cout << "Recording finished: output.mp4" << std::endl;
    std::cout << "Frames received: " << frames_received << std::endl;
    std::cout << "Packets written: " << frames_writed << std::endl;
    return 0;
}