//
// Created by hyh on 4/29/20.
//

#include <AudioRecord.h>

/* check that a given sample format is supported by the encoder */
static int check_sample_fmt(const AVCodec *codec, enum AVSampleFormat sample_fmt)
{
    const enum AVSampleFormat *p = codec->sample_fmts;
    while (*p != AV_SAMPLE_FMT_NONE) {
        if (*p == sample_fmt)
            return 1;
        p++;
    }
    return 0;
}

/* just pick the highest supported samplerate */
static int select_sample_rate(const AVCodec *codec)
{
    const int *p;
    int best_samplerate = 0;
    if (!codec->supported_samplerates)
        return 44100;
    p = codec->supported_samplerates;
    while (*p) {
        if (!best_samplerate || abs(44100 - *p) < abs(44100 - best_samplerate))
            best_samplerate = *p;
        p++;
    }
    return best_samplerate;
}

/* select layout with the highest channel count */
static int select_channel_layout(const AVCodec *codec)
{
    const uint64_t *p;
    uint64_t best_ch_layout = 0;
    int best_nb_channels   = 0;
    if (!codec->channel_layouts)
        return AV_CH_LAYOUT_STEREO;
    p = codec->channel_layouts;
    while (*p) {
        int nb_channels = av_get_channel_layout_nb_channels(*p);
        if (nb_channels > best_nb_channels) {
            best_ch_layout    = *p;
            best_nb_channels = nb_channels;
        }
        p++;
    }
    return best_ch_layout;
}

static void encode_(AVCodecContext *ctx, AVFrame *frame, AVPacket *pkt,
                   FILE *output)
{
    int ret;
    /* send the frame for encoding */
    ret = avcodec_send_frame(ctx, frame);
    if (ret < 0) {
        LOGE("Error sending the frame to the encoder\n");
        exit(1);
    }
    /* read all the available output packets (in general there may be any
     * number of them */
    while (ret >= 0) {
        ret = avcodec_receive_packet(ctx, pkt);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            return;
        else if (ret < 0) {
            LOGE("Error encoding audio frame\n");
            exit(1);
        }
        fwrite(pkt->data, 1, pkt->size, output);
        av_packet_unref(pkt);
    }
}


#include <AudioRecord.h>
void AudioRecord::open(const char *filePath) {

    avcodec_register_all();

    audioCodec = avcodec_find_encoder(AV_CODEC_ID_AAC);

    if(!audioCodec){
        LOGE("Codec not found\n");
        exit(1);
    }



    audioCodecCtx = avcodec_alloc_context3(audioCodec);
    if (!audioCodecCtx) {
        LOGE("Could not allocate audio codec context\n");
        exit(1);
    }

    AVSampleFormat inSampleFmt = AV_SAMPLE_FMT_S16;
    AVSampleFormat outSampleFmt = AV_SAMPLE_FMT_FLTP;
//    AVSampleFormat outSampleFmt = AV_SAMPLE_FMT_S16;
    const int sampleRate = 16000;
    const int channels = 1;

    audioCodecCtx->bit_rate=64000;
    audioCodecCtx->sample_fmt = outSampleFmt;
    if (!check_sample_fmt(audioCodec, audioCodecCtx->sample_fmt)) {
        LOGE("Encoder does not support sample format %s",
                av_get_sample_fmt_name(audioCodecCtx->sample_fmt));
        exit(1);
    }
    audioCodecCtx->sample_rate = sampleRate;
    audioCodecCtx->channel_layout = AV_CH_LAYOUT_MONO;
    audioCodecCtx->channels = av_get_channel_layout_nb_channels(audioCodecCtx->channel_layout);

    if(avcodec_open2(audioCodecCtx,audioCodec,NULL)<0){
        LOGE("Could not open codec\n");
        exit(1);
    }

    f = fopen(filePath,"wb");
    if(!f){
        LOGE("Could not open %s\n", filePath);
        exit(1);
    }

    audioPkt = av_packet_alloc();
    if (!audioPkt) {
        LOGE("could not allocate the packet\n");
        exit(1);
    }
    /* frame containing input raw audio */
    audioFrame = av_frame_alloc();
    if (!audioFrame) {
        LOGE("Could not allocate audio frame\n");
        exit(1);
    }

    audioFrame->nb_samples = audioCodecCtx->frame_size;
    audioFrame->format = audioCodecCtx->sample_fmt;
    audioFrame->channel_layout = audioCodecCtx->channel_layout;



    ///2 音频重采样 上下文初始化
    asc = swr_alloc_set_opts(asc,
                                 av_get_default_channel_layout(channels),
                                 outSampleFmt,
                                 sampleRate,//输出格式
                                 av_get_default_channel_layout(channels),
                                 inSampleFmt,
                                 sampleRate, 0,
                                 0);//输入格式

    swr_init(asc);
    int ret = av_frame_get_buffer(audioFrame, 0);
    if (ret < 0) {
        LOGE("Could not allocate audio data buffers\n");
        exit(1);
    }
}

static int count =0;
void AudioRecord::encode(uint8_t *data, int len) {
    int ret = av_frame_make_writable(audioFrame);
    if (ret < 0){
        LOGE("av_frame_make_writable error")
        exit(1);
    }

    //重采样源数据
    const uint8_t *indata[AV_NUM_DATA_POINTERS] = {0};
    indata[0] = data;

    ret = swr_convert(asc, audioFrame->data, audioFrame->nb_samples, //输出参数，输出存储地址和样本数量
                      indata, audioFrame->nb_samples);
    LOGE("33333");
    if (ret < 0) {
        LOGE("swr_convert error");
        return;
    }

//    AVRational av;
//    av.num = 1;
//    av.den = audioFrame->nb_samples;
//
//    int dst_nb_samples = av_rescale_rnd(swr_get_delay(asc, audioCodecCtx->sample_rate) + audioFrame->nb_samples,
//                                        audioCodecCtx->sample_rate, audioCodecCtx->sample_rate, AV_ROUND_UP);
//
//    audioFrame->pts = av_rescale_q(count, (AVRational){1, audioCodecCtx->sample_rate}, audioCodecCtx->time_base);
//    count += dst_nb_samples;

//    apts += av_rescale_q(audioFrame->nb_samples, av, audioCodecCtx->time_base);

    encode_(audioCodecCtx, audioFrame,audioPkt,f);

}

void AudioRecord::stop() {
    encode_(audioCodecCtx,NULL,audioPkt,f);
    fclose(f);
    av_frame_free(&audioFrame);
    av_packet_free(&audioPkt);
    avcodec_free_context(&audioCodecCtx);
}