#include "spk_camera.h"
#include "config.h"
#include "audio.h"

#define LOG_TAG "audio"
#define LOG_LVL LOG_LVL_INFO
#include "spk_ulog.h"

#define AUDIO_DEVICE "hw:2,0"
#define OUTPUT_CHANNELS 1
#define OUTPUT_SAMPLE_RATE 8000
#define OUTPUT_BIT_RATE (8000 * 8)

cls_audio::cls_audio(cls_spkcam *p_app)
{
    app = p_app;

    open_audio_device();
    init_codec();
    init_resampler();
    init_fifo();
}

cls_audio::~cls_audio()
{
    audio_loop_stop();
    
    app = nullptr;

    free_fifo();
    free_resampler();
    free_codec();
    close_audio_device();
}

void cls_audio::audio_main_loop()
{
    while (audio_running)
    {
        while (av_audio_fifo_size(fifo) < AUDIO_SIZE)
        {
            read_decode_convert_and_store();
        }
        while (av_audio_fifo_size(fifo) >= AUDIO_SIZE)
        {
            load_encode_and_add_queue();
        }
    }
}

void cls_audio::audio_loop_start()
{
    audio_running = true;

    main_loop_thread = std::make_unique<std::thread>([this]()
                                                     {
                                                         audio_main_loop(); // 直接调用非静态成员函数
                                                     });
}

void cls_audio::audio_loop_stop()
{
    audio_running = false;

    if (main_loop_thread && main_loop_thread->joinable())
    {
        main_loop_thread->join();
    }
}

void cls_audio::open_audio_device()
{
    // we init "input_format_context" and "input_codec_context" here

    int error;

    avdevice_register_all();
    AVInputFormat *_ipt_fmt = av_find_input_format("alsa");
    AVDictionary *_fmt_opts = NULL;
    av_dict_set_int(&_fmt_opts, "audio_buffer_size", 20, 0);
    av_dict_set(&_fmt_opts, "channels", "1", 0);
    /* Open the input file to read from it. */
    if ((error = avformat_open_input(&input_format_context, AUDIO_DEVICE, _ipt_fmt,
                                     &_fmt_opts)) < 0)
    {
        fprintf(stderr, "Could not open input file %s\n", AUDIO_DEVICE);
        input_format_context = NULL;
        return;
    }

    /* Get information on the input file (number of streams etc.). */
    if ((error = avformat_find_stream_info(input_format_context, NULL)) < 0)
    {
        fprintf(stderr, "Could not open find stream info (error)\n");
        avformat_close_input(&input_format_context);
        return;
    }

    /* Make sure that there is only one stream in the input file. */
    if (input_format_context->nb_streams != 1)
    {
        fprintf(stderr, "Expected one audio input stream, but found %d\n",
                (input_format_context)->nb_streams);
        avformat_close_input(&input_format_context);
        // return AVERROR_EXIT;
        return;
    }

    AVCodec *input_codec = NULL;
    /* Find a decoder for the audio stream. */
    if (!(input_codec = (AVCodec *)avcodec_find_decoder((input_format_context)->streams[0]->codecpar->codec_id)))
    {
        fprintf(stderr, "Could not find input codec\n");
        avformat_close_input(&input_format_context);
        // return AVERROR_EXIT;
        return;
    }

    input_codec_context = avcodec_alloc_context3(input_codec);

    if (!input_codec_context)
    {
        fprintf(stderr, "Could not allocate a decoding context\n");
        avformat_close_input(&input_format_context);
        // return AVERROR(ENOMEM);
        return;
    }

    /* Initialize the stream parameters with demuxer information. */
    error = avcodec_parameters_to_context(input_codec_context, input_format_context->streams[0]->codecpar);
    if (error < 0)
    {
        avformat_close_input(&input_format_context);
        avcodec_free_context(&input_codec_context);
        return;
    }

    /* Open the decoder for the audio stream to use it later. */
    if ((error = avcodec_open2(input_codec_context, input_codec, NULL)) < 0)
    {
        fprintf(stderr, "Could not open input codec\n");
        avcodec_free_context(&input_codec_context);
        avformat_close_input(&input_format_context);
        return;
    }
}

void cls_audio::close_audio_device()
{
    if (input_codec_context)
    {
        avcodec_free_context(&input_codec_context);
    }
    if (input_format_context)
    {
        avformat_close_input(&input_format_context);
    }
}

void cls_audio::init_codec()
{
    AVCodec *output_codec = NULL;
    int error;

    /* Find the encoder to be used by its name. */
    // AV_CODEC_ID_PCM_ALAW
    // AV_CODEC_ID_PCM_U8
    // AV_CODEC_ID_AAC
    if (!(output_codec = (AVCodec *)avcodec_find_encoder(AV_CODEC_ID_PCM_ALAW)))
    {
        fprintf(stderr, "Could not find an g711A encoder.\n");
        return;
    }

    output_codec_context = avcodec_alloc_context3(output_codec);

    if (!output_codec_context)
    {
        fprintf(stderr, "Could not allocate an encoding context\n");
        error = AVERROR(ENOMEM);
        return;
    }

    /* Set the basic encoder parameters. */
    output_codec_context->channels = OUTPUT_CHANNELS;
    output_codec_context->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
    output_codec_context->sample_rate = OUTPUT_SAMPLE_RATE;
    output_codec_context->sample_fmt = output_codec->sample_fmts[0];
    output_codec_context->bit_rate = OUTPUT_BIT_RATE;

    /* Open the encoder for the audio stream to use it later. */
    if ((error = avcodec_open2(output_codec_context, output_codec, NULL)) < 0)
    {
        fprintf(stderr, "Could not open output codec \n");
        return;
    }
}

void cls_audio::free_codec()
{
    if (output_codec_context)
    {
        avcodec_free_context(&output_codec_context);
    }
}

void cls_audio::init_resampler()
{
    int error;

    /*
     * Create a resampler context for the conversion.
     * Set the conversion parameters.
     * Default channel layouts based on the number of channels
     * are assumed for simplicity (they are sometimes not detected
     * properly by the demuxer and/or decoder).
     */
    resample_context = swr_alloc_set_opts(NULL,
                                          av_get_default_channel_layout(output_codec_context->channels),
                                          output_codec_context->sample_fmt,
                                          output_codec_context->sample_rate,
                                          av_get_default_channel_layout(input_codec_context->channels),
                                          input_codec_context->sample_fmt,
                                          input_codec_context->sample_rate,
                                          0, NULL);
    if (!resample_context)
    {
        fprintf(stderr, "Could not allocate resample context\n");
        // return AVERROR(ENOMEM);
        return;
    }
    /*
     * Perform a sanity check so that the number of converted samples is
     * not greater than the number of samples to be converted.
     * If the sample rates differ, this case has to be handled differently
     */
    av_assert0(output_codec_context->sample_rate <= input_codec_context->sample_rate);

    /* Open the resampler with the specified parameters. */
    if ((error = swr_init(resample_context)) < 0)
    {
        fprintf(stderr, "Could not open resample context\n");
        swr_free(&resample_context);
        return;
    }
}

void cls_audio::free_resampler()
{
    swr_free(&resample_context);
}

void cls_audio::init_fifo()
{
    /* Create the FIFO buffer based on the specified output sample format. */
    if (!(fifo = av_audio_fifo_alloc(output_codec_context->sample_fmt,
                                     output_codec_context->channels, 1)))
    {
        fprintf(stderr, "Could not allocate FIFO\n");
        // return AVERROR(ENOMEM);
        return;
    }
}

void cls_audio::free_fifo()
{
    if (fifo)
    {
        av_audio_fifo_free(fifo);
    }
}

int cls_audio::read_decode_convert_and_store()
{
    /* Temporary storage of the input samples of the frame read from the file. */
    AVFrame *input_frame = NULL;
    /* Temporary storage for the converted input samples. */
    uint8_t **converted_input_samples = NULL;
    int data_present = 0;
    int ret = AVERROR_EXIT;

    /* Initialize temporary storage for one input frame. */
    if (init_input_frame(&input_frame))
        goto cleanup;
    /* Decode one frame worth of audio samples. */
    if (decode_audio_frame(input_frame, input_format_context,
                           input_codec_context, &data_present))
        goto cleanup;

    /* If there is decoded data, convert and store it. */
    if (data_present)
    {
        /* Initialize the temporary storage for the converted input samples. */
        if (init_converted_samples(&converted_input_samples, output_codec_context,
                                   input_frame->nb_samples))
            goto cleanup;

        /* Convert the input samples to the desired output sample format.
         * This requires a temporary storage provided by converted_input_samples. */
        int dst_frame_size;
        if ((dst_frame_size = convert_samples((const uint8_t **)input_frame->extended_data, converted_input_samples,
                                              input_frame->nb_samples, resample_context)) < 0)
            goto cleanup;

        if (dst_frame_size > 0)
        {
            if (add_samples_to_fifo(fifo, converted_input_samples,
                                    dst_frame_size))
                goto cleanup;
        }
        ret = 0;
    }
    ret = 0;

cleanup:
    if (converted_input_samples)
    {
        av_freep(&converted_input_samples[0]);
        free(converted_input_samples);
    }
    av_frame_free(&input_frame);

    return ret;
}

int cls_audio::load_encode_and_add_queue()
{
    /* Temporary storage of the output samples of the frame written to the file. */
    AVFrame *output_frame;
    /* Use the maximum number of possible samples per frame.
     * If there is less than the maximum possible frame size in the FIFO
     * buffer use this number. Otherwise, use the maximum possible frame size. */
    // const int frame_size = FFMIN(av_audio_fifo_size(fifo),
    //                              output_codec_context->frame_size);
    // const int frame_size = 1024;
    const int frame_size = av_audio_fifo_size(fifo);
    // DEBUG("fifo size: %d\n", av_audio_fifo_size(fifo));
    // NOTE: out_codec_ctx.frame_size == 0 means
    // "variable frame size" (it is so for pcm format)
    // DEBUG("out codec frame size: %d\n", output_codec_context->frame_size);
    int data_written;

    /* Initialize temporary storage for one output frame. */
    if (init_output_frame(&output_frame, output_codec_context, frame_size))
        return AVERROR_EXIT;

    /* Read as many samples from the FIFO buffer as required to fill the frame.
     * The samples are stored in the frame temporarily. */
    if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size)
    {
        fprintf(stderr, "Could not read data from FIFO\n");
        av_frame_free(&output_frame);
        return AVERROR_EXIT;
    }

    // we will send the encoded frame to audio queue here
    /* Encode one frame worth of audio samples. */
    if (encode_audio_frame(output_frame, output_format_context,
                           output_codec_context, &data_written))
    {
        av_frame_free(&output_frame);
        return AVERROR_EXIT;
    }
    av_frame_free(&output_frame);
    return 0;
}

void cls_audio::init_packet(AVPacket *packet)
{
    av_init_packet(packet);
    /* Set the packet data and size so that it is recognized as being empty. */
    packet->data = NULL;
    packet->size = 0;
}

int cls_audio::init_input_frame(AVFrame **frame)
{
    if (!(*frame = av_frame_alloc()))
    {
        fprintf(stderr, "Could not allocate input frame\n");
        return AVERROR(ENOMEM);
    }
    return 0;
}

int cls_audio::decode_audio_frame(AVFrame *frame, AVFormatContext *input_format_context, AVCodecContext *input_codec_context, int *data_present)
{
    /* Packet used for temporary storage. */
    AVPacket input_packet;
    int error;
    init_packet(&input_packet);

    /* Read one audio frame from the input file into a temporary packet. */
    if ((error = av_read_frame(input_format_context, &input_packet)) < 0)
    {
        /* If we are at the end of the file, flush the decoder below. */
        if (error == AVERROR_EOF)
            error = 0;
        else
        {
            fprintf(stderr, "Could not read frame \n");
            // fprintf(stderr, "Could not read frame (error '%s')\n",
            //         av_err2str(error));
            return error;
        }
    }

    /* Send the audio frame stored in the temporary packet to the decoder.
     * The input audio stream decoder is used to do this. */
    if ((error = avcodec_send_packet(input_codec_context, &input_packet)) < 0)
    {
        fprintf(stderr, "Could not send packet for decoding \n");
        // fprintf(stderr, "Could not send packet for decoding (error '%s')\n",
        //         av_err2str(error));
        return error;
    }

    /* Receive one frame from the decoder. */
    error = avcodec_receive_frame(input_codec_context, frame);
    /* If the decoder asks for more data to be able to decode a frame,
     * return indicating that no data is present. */
    if (error == AVERROR(EAGAIN))
    {
        error = 0;
        goto cleanup;
        /* If the end of the input file is reached, stop decoding. */
    }
    else if (error == AVERROR_EOF)
    {
        error = 0;
        goto cleanup;
    }
    else if (error < 0)
    {
        fprintf(stderr, "Could not decode frame \n");
        // fprintf(stderr, "Could not decode frame (error '%s')\n",
        //         av_err2str(error));
        goto cleanup;
        /* Default case: Return decoded data. */
    }
    else
    {
        *data_present = 1;
        goto cleanup;
    }

cleanup:
    av_packet_unref(&input_packet);
    return error;
}

int cls_audio::init_converted_samples(uint8_t ***converted_input_samples, AVCodecContext *output_codec_context, int frame_size)
{
    int error;

    /* Allocate as many pointers as there are audio channels.
     * Each pointer will later point to the audio samples of the corresponding
     * channels (although it may be NULL for interleaved formats).
     */
    if (!(*converted_input_samples = (uint8_t **)calloc(output_codec_context->channels,
                                                        sizeof(**converted_input_samples))))
    {
        fprintf(stderr, "Could not allocate converted input sample pointers\n");
        return AVERROR(ENOMEM);
    }

    /* Allocate memory for the samples of all channels in one consecutive
     * block for convenience. */
    if ((error = av_samples_alloc(*converted_input_samples, NULL,
                                  output_codec_context->channels,
                                  frame_size,
                                  output_codec_context->sample_fmt, 0)) < 0)
    {
        fprintf(stderr, "Could not allocate converted input samples \n");
        // fprintf(stderr, "Could not allocate converted input samples (error '%s')\n",
        //         av_err2str(error));
        av_freep(&(*converted_input_samples)[0]);
        free(*converted_input_samples);
        return error;
    }
    return 0;
}

int cls_audio::convert_samples(const uint8_t **input_data, uint8_t **converted_data, const int frame_size, SwrContext *resample_context)
{
    int error;

    // int dst_frame_size = swr_get_out_samples(resample_context, frame_size);
    // LOG_D("[convert_samples]: %d[input_samples]: %d\n", dst_frame_size, frame_size);
    /* Convert the samples using the resampler. */
    if ((error = swr_convert(resample_context,
                             converted_data, frame_size,
                             input_data, frame_size)) < 0)
    {
        fprintf(stderr, "Could not convert input samples \n");
        return error;
    }

    return error;
}

int cls_audio::add_samples_to_fifo(AVAudioFifo *fifo, uint8_t **converted_input_samples, const int frame_size)
{
    int error;

    /* Make the FIFO as large as it needs to be to hold both,
     * the old and the new samples. */
    if ((error = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame_size)) < 0)
    {
        fprintf(stderr, "Could not reallocate FIFO\n");
        return error;
    }

    /* Store the new samples in the FIFO buffer. */
    if (av_audio_fifo_write(fifo, (void **)converted_input_samples,
                            frame_size) < frame_size)
    {
        fprintf(stderr, "Could not write data to FIFO\n");
        return AVERROR_EXIT;
    }
    return 0;
}

int cls_audio::init_output_frame(AVFrame **frame, AVCodecContext *output_codec_context, int frame_size)
{
    int error;

    /* Create a new frame to store the audio samples. */
    if (!(*frame = av_frame_alloc()))
    {
        fprintf(stderr, "Could not allocate output frame\n");
        return AVERROR_EXIT;
    }

    /* Set the frame's parameters, especially its size and format.
     * av_frame_get_buffer needs this to allocate memory for the
     * audio samples of the frame.
     * Default channel layouts based on the number of channels
     * are assumed for simplicity. */
    (*frame)->nb_samples = frame_size;
    (*frame)->channel_layout = output_codec_context->channel_layout;
    (*frame)->format = output_codec_context->sample_fmt;
    (*frame)->sample_rate = output_codec_context->sample_rate;

    // LOG_D("nb_samples: %d, channel_layout: %d, format: %d, sample rate: %d\n", (*frame)->nb_samples, (*frame)->channel_layout, (*frame)->format, (*frame)->sample_rate);

    /* Allocate the samples of the created frame. This call will make
     * sure that the audio frame can hold as many samples as specified. */
    if ((error = av_frame_get_buffer(*frame, 0)) < 0)
    {
        fprintf(stderr, "Could not allocate output frame samples \n");
        // fprintf(stderr, "Could not allocate output frame samples (error '%s')\n",
        //         av_err2str(error));
        av_frame_free(frame);
        return error;
    }

    return 0;
}

int cls_audio::encode_audio_frame(AVFrame *frame, AVFormatContext *output_format_context, AVCodecContext *output_codec_context, int *data_present)
{
    /* Packet used for temporary storage. */
    AVPacket output_packet;
    int error;
    init_packet(&output_packet);

    /* Set a timestamp based on the sample rate for the container. */
    if (frame)
    {
        frame->pts = pts;
        pts += frame->nb_samples;
    }

    /* Send the audio frame stored in the temporary packet to the encoder.
     * The output audio stream encoder is used to do this. */
    error = avcodec_send_frame(output_codec_context, frame);
    /* The encoder signals that it has nothing more to encode. */
    if (error == AVERROR_EOF)
    {
        error = 0;
        goto cleanup;
    }
    else if (error < 0)
    {
        fprintf(stderr, "Could not send packet for encoding \n");
        // fprintf(stderr, "Could not send packet for encoding (error '%s')\n",
        //         av_err2str(error));
        return error;
    }

    /* Receive one encoded frame from the encoder. */
    error = avcodec_receive_packet(output_codec_context, &output_packet);
    /* If the encoder asks for more data to be able to provide an
     * encoded frame, return indicating that no data is present. */
    if (error == AVERROR(EAGAIN))
    {
        error = 0;
        goto cleanup;
        /* If the last frame has been encoded, stop encoding. */
    }
    else if (error == AVERROR_EOF)
    {
        error = 0;
        goto cleanup;
    }
    else if (error < 0)
    {
        fprintf(stderr, "Could not encode frame \n");
        // fprintf(stderr, "Could not encode frame (error '%s')\n",
        //         av_err2str(error));
        goto cleanup;
        /* Default case: Return encoded data. */
    }
    else
    {
        *data_present = 1;

        /***
         *_______________#########_______________________
         *______________############_____________________
         *______________#############____________________
         *_____________##__###########___________________
         *____________###__######_#####__________________
         *____________###_#######___####_________________
         *___________###__##########_####________________
         *__________####__###########_####_______________
         *________#####___###########__#####_____________
         *_______######___###_########___#####___________
         *_______#####___###___########___######_________
         *______######___###__###########___######_______
         *_____######___####_##############__######______
         *____#######__#####################_#######_____
         *____#######__##############################____
         *___#######__######_#################_#######___
         *___#######__######_######_#########___######___
         *___#######____##__######___######_____######___
         *___#######________######____#####_____#####____
         *____######________#####_____#####_____####_____
         *_____#####________####______#####_____###______
         *______#####______;###________###______#________
         *________##_______####________####______________
         */
        int snd_size = (output_packet.size > AUDIO_SIZE) ? AUDIO_SIZE : output_packet.size;
        send_msg_queue(app->audio_queue, (char *)output_packet.data, snd_size, 0);
        LOG_D("audio size = %d", snd_size);
    }

cleanup:
    av_packet_unref(&output_packet);
    return error;
}
