#include <iostream>
#include <thread>
#include <chrono>
#include <signal.h>

extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavdevice/avdevice.h>
#include <libavutil/opt.h>
#include <libswresample/swresample.h>
#include <libavutil/fifo.h>
}

volatile sig_atomic_t stop_flag = 0;

void signal_handler(int signal) {
    stop_flag = 1;
}

int main() {
    signal(SIGINT, signal_handler);
    signal(SIGTERM, signal_handler);

    avdevice_register_all();
    avformat_network_init();
    av_log_set_level(AV_LOG_DEBUG);

    // ================== 初始化输出文件上下文 ==================
    AVFormatContext* formatCtx = nullptr;
    avformat_alloc_output_context2(&formatCtx, NULL, "mp4", "audio.mp4");
    if (!formatCtx) {
        std::cerr << "Could not create output context" << std::endl;
        return -1;
    }

    const int SAMPLE_RATE = 48000;
    const enum AVSampleFormat SAMPLE_FMT = AV_SAMPLE_FMT_FLTP;
    const int CHANNELS = 2;
    const int64_t CHANNEL_LAYOUT = AV_CH_LAYOUT_STEREO;

    // ================== 创建音频流 ==================
    AVStream* audioStm = avformat_new_stream(formatCtx, nullptr);
    audioStm->time_base = (AVRational){1, SAMPLE_RATE};

    const AVCodec* aacEncoder = avcodec_find_encoder(AV_CODEC_ID_AAC);
    AVCodecContext* audioCodecCtx = avcodec_alloc_context3(aacEncoder);
    if (!audioCodecCtx) {
        std::cerr << "Could not allocate audio codec context" << std::endl;
        return -1;
    }

    audioCodecCtx->sample_rate = SAMPLE_RATE;
    audioCodecCtx->channel_layout = CHANNEL_LAYOUT;
    audioCodecCtx->channels = CHANNELS;
    audioCodecCtx->sample_fmt = aacEncoder->sample_fmts[0];
    audioCodecCtx->bit_rate = 128000;
    audioCodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    audioCodecCtx->time_base = (AVRational){1, SAMPLE_RATE};

    AVDictionary* aacOptions = nullptr;
    av_dict_set(&aacOptions, "profile", "aac_low", 0); // 可选 profile
    av_dict_set(&aacOptions, "preset", "fast", 0);

    if (avcodec_open2(audioCodecCtx, aacEncoder, &aacOptions) < 0) {
        std::cerr << "Could not open audio encoder" << std::endl;
        return -1;
    }

    if (avcodec_parameters_from_context(audioStm->codecpar, audioCodecCtx) < 0) {
        std::cerr << "Failed to copy codec parameters to stream" << std::endl;
        return -1;
    }

    if ((formatCtx->oformat->flags & AVFMT_NOFILE) == 0) {
        if (avio_open(&formatCtx->pb, "audio.mp4", AVIO_FLAG_WRITE) < 0) {
            std::cerr << "Could not open output file" << std::endl;
            return -1;
        }
    }

    if (avformat_write_header(formatCtx, nullptr) < 0) {
        std::cerr << "Error writing header" << std::endl;
        return -1;
    }

    // ================== 打开麦克风输入 ==================
    AVFormatContext* audioInputFmtCtx = nullptr;
    const AVInputFormat* audioInputFmt = av_find_input_format("avfoundation");
    AVDictionary* audioOptions = nullptr;
    av_dict_set(&audioOptions, "sample_rate", "48000", 0);   // 采样率 48k
    av_dict_set(&audioOptions, "channels", "2", 0);          // 声道数 2

    if (avformat_open_input(&audioInputFmtCtx, ":0", audioInputFmt, &audioOptions) != 0) {
        std::cerr << "Cannot open microphone input" << std::endl;
        return -1;
    }

    if (avformat_find_stream_info(audioInputFmtCtx, nullptr) < 0) {
        std::cerr << "Cannot find audio stream info" << std::endl;
        return -1;
    }

    int audioStreamIdx = -1;
    for (int i = 0; i < audioInputFmtCtx->nb_streams; ++i) {
        if (audioInputFmtCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
            audioStreamIdx = i;
            break;
        }
    }

    if (audioStreamIdx == -1) {
        std::cerr << "Could not find audio stream" << std::endl;
        return -1;
    }

    const AVCodec* audioDecoder = avcodec_find_decoder(
        audioInputFmtCtx->streams[audioStreamIdx]->codecpar->codec_id);
    AVCodecContext* audioDecoderCtx = avcodec_alloc_context3(audioDecoder);
    avcodec_parameters_to_context(audioDecoderCtx,
                                  audioInputFmtCtx->streams[audioStreamIdx]->codecpar);

    if (avcodec_open2(audioDecoderCtx, audioDecoder, NULL) < 0) {
        std::cerr << "Could not open audio decoder" << std::endl;
        return -1;
    }

    // ================== 音频重采样上下文 ==================
    SwrContext* swrCtx = swr_alloc_set_opts(nullptr,
                                            CHANNEL_LAYOUT, SAMPLE_FMT, SAMPLE_RATE,
                                            audioDecoderCtx->channel_layout,
                                            audioDecoderCtx->sample_fmt,
                                            audioDecoderCtx->sample_rate,
                                            0, nullptr);
    swr_init(swrCtx);

    // ================== 分配音频帧和包 ==================
    AVFrame* frame = av_frame_alloc();
    AVPacket* packet = av_packet_alloc();
    AVPacket* pkt = av_packet_alloc();

    AVFifoBuffer* audio_fifo = av_fifo_alloc(4096 * sizeof(float));
    int frame_size = audioCodecCtx->frame_size;
    int64_t audio_pts = 0;

    // ================== 主循环 ==================
    while (!stop_flag) {
        int ret = av_read_frame(audioInputFmtCtx, packet);
        if (ret < 0) {
            if (ret == AVERROR(EAGAIN)) {
                std::this_thread::sleep_for(std::chrono::milliseconds(10));
                av_packet_unref(packet);
                continue;
            } else {
                std::cerr << "Error reading audio frame" << std::endl;
                break;
            }
        }

        if (packet->stream_index == audioStreamIdx) {
            ret = avcodec_send_packet(audioDecoderCtx, packet);
            while (ret >= 0) {
                ret = avcodec_receive_frame(audioDecoderCtx, frame);
                if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break;
                if (ret < 0) {
                    std::cerr << "Error decoding audio frame" << std::endl;
                    break;
                }

                
                 // ========== 重采样 ==========
                AVFrame* swrOutFrame = av_frame_alloc();
                swrOutFrame->sample_rate = SAMPLE_RATE;
                swrOutFrame->channel_layout = CHANNEL_LAYOUT;
                swrOutFrame->format = SAMPLE_FMT;
                swrOutFrame->nb_samples = frame->nb_samples;

                if (av_frame_get_buffer(swrOutFrame, 0) < 0) {
                    std::cerr << "Could not allocate buffer for resampled frame" << std::endl;
                    av_frame_free(&swrOutFrame);
                    continue;
                }

                if ((ret = swr_convert_frame(swrCtx, swrOutFrame, frame)) < 0) {
                    char err[AV_ERROR_MAX_STRING_SIZE];
                    std::cerr << "Error during resampling: "
                            << av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, ret)
                            << std::endl;
                    av_frame_free(&swrOutFrame);
                    continue;
                }

                if (swrOutFrame->format != AV_SAMPLE_FMT_FLTP) {
                    std::cerr << "Unexpected sample format: " << swrOutFrame->format << std::endl;
                    continue;
                }
                // 写入 FIFO
                //av_fifo_generic_write(audio_fifo, frame->data[0], frame->nb_samples * sizeof(float), NULL);

                           // 写入 FIFO
                av_fifo_generic_write(audio_fifo, swrOutFrame->data[0],
                    swrOutFrame->nb_samples * sizeof(float), NULL);

                av_frame_free(&swrOutFrame);
                // 处理 FIFO 中的数据
                while (av_fifo_size(audio_fifo) >= frame_size * sizeof(float)) {
                    AVFrame* swrFrame = av_frame_alloc();
                    swrFrame->sample_rate = SAMPLE_RATE;
                    swrFrame->channel_layout = CHANNEL_LAYOUT;
                    swrFrame->format = SAMPLE_FMT;
                    swrFrame->nb_samples = frame_size;

                    if (av_frame_get_buffer(swrFrame, 0) < 0) {
                        std::cerr << "Could not allocate frame buffer" << std::endl;
                        av_frame_free(&swrFrame);
                        continue;
                    }

                    av_fifo_generic_read(audio_fifo, swrFrame->data[0], frame_size * sizeof(float), NULL);

                    // 检查 NaN / Inf
                    float* samples = reinterpret_cast<float*>(swrFrame->data[0]);
                    bool has_nan = false;
                    for (int i = 0; i < frame_size; ++i) {
                        if (std::isnan(samples[i]) || std::isinf(samples[i])) {
                            has_nan = true;
                            break;
                        }
                    }
                    if (has_nan) {
                        std::cerr << "Found NaN or Inf in audio data" << std::endl;
                        av_frame_free(&swrFrame);
                        continue;
                    }

                    // 设置 pts
                    swrFrame->pts = av_rescale_q(audio_pts++, 
                                                (AVRational){1, SAMPLE_RATE}, 
                                                audioCodecCtx->time_base);

                    if (avcodec_send_frame(audioCodecCtx, swrFrame) < 0) {
                        std::cerr << "Error sending audio frame to encoder" << std::endl;
                    }

                    while (avcodec_receive_packet(audioCodecCtx, pkt) >= 0) {
                        av_packet_rescale_ts(pkt, audioCodecCtx->time_base, audioStm->time_base);
                        pkt->stream_index = audioStm->index;
                        av_interleaved_write_frame(formatCtx, pkt);
                        av_packet_unref(pkt);
                    }

                    av_frame_free(&swrFrame);
                }
            }
        }

        av_packet_unref(packet);
    }

    // ================== Flush FIFO and Encoder ==================
    AVFrame* finalFrame = av_frame_alloc();
    finalFrame->sample_rate = SAMPLE_RATE;
    finalFrame->channel_layout = CHANNEL_LAYOUT;
    finalFrame->format = SAMPLE_FMT;
    finalFrame->nb_samples = frame_size;

    if (av_frame_get_buffer(finalFrame, 0) >= 0) {
        int remaining = av_fifo_size(audio_fifo) / sizeof(float);
        if (remaining > 0) {
            av_fifo_generic_read(audio_fifo, finalFrame->data[0], remaining * sizeof(float), NULL);
            memset((char*)finalFrame->data[0] + remaining * sizeof(float),
                   0, (frame_size - remaining) * sizeof(float));

            finalFrame->pts = audio_pts++;
            avcodec_send_frame(audioCodecCtx, finalFrame);
            while (avcodec_receive_packet(audioCodecCtx, pkt) >= 0) {
                av_packet_rescale_ts(pkt, audioCodecCtx->time_base, audioStm->time_base);
                pkt->stream_index = audioStm->index;
                av_interleaved_write_frame(formatCtx, pkt);
                av_packet_unref(pkt);
            }
        }
    }

    // Flush 编码器剩余帧
    avcodec_send_frame(audioCodecCtx, NULL);
    while (avcodec_receive_packet(audioCodecCtx, pkt) >= 0) {
        av_packet_rescale_ts(pkt, audioCodecCtx->time_base, audioStm->time_base);
        pkt->stream_index = audioStm->index;
        av_interleaved_write_frame(formatCtx, pkt);
        av_packet_unref(pkt);
    }

    // ================== 清理资源 ==================
    av_write_trailer(formatCtx);
    if ((formatCtx->oformat->flags & AVFMT_NOFILE) == 0) {
        avio_closep(&formatCtx->pb);
    }

    avformat_free_context(formatCtx);
    avcodec_free_context(&audioCodecCtx);
    avcodec_free_context(&audioDecoderCtx);
    av_frame_free(&frame);
    av_frame_free(&finalFrame);
    av_packet_free(&packet);
    av_packet_free(&pkt);
    swr_free(&swrCtx);
    av_fifo_freep(&audio_fifo);

    std::cout << "Audio capture finished. Output saved to audio.mp4" << std::endl;
    return 0;
}