//
// Created by xiongqimin on 2022/3/24.
//

#include <memory>
#include <string>
#include <iostream>

#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>

#include "absl/flags/flag.h"
#include "absl/flags/parse.h"

#include "common_audio/wav_file.h"
#include "modules/audio_processing/ns/noise_suppressor.h"
#include "test/common/FilePlayingSource.h"

ABSL_FLAG(std::string, input_file_1,
          "", "First input. Default none.");

ABSL_FLAG(std::string,
          output_file,
          "mixed_file.wav",
          "File in which to store the mixed result.");

ABSL_FLAG(int, sampling_rate,

          8000, "Rate at which to mix (all input streams must have this rate)");

ABSL_FLAG(bool, stereo,

          false, "Enable stereo (interleaved). Inputs need not be as this parameter.");

int main(int argc, char *argv[]) {
    absl::ParseCommandLine(argc, argv);
    const int num_channels = absl::GetFlag(FLAGS_stereo) ? 2 : 1;
    const int sampleRate = absl::GetFlag(FLAGS_sampling_rate);

    webrtc::NsConfig cfg;
    webrtc::StreamConfig stream_config(sampleRate, num_channels);
//    cfg.target_level = webrtc::NsConfig::SuppressionLevel::k21dB;

    webrtc::NoiseSuppressor ns(cfg, sampleRate, num_channels);

    webrtc::AudioBuffer audio(sampleRate, num_channels, sampleRate, num_channels, sampleRate,
                              num_channels);

    const auto& input_file = absl::GetFlag(FLAGS_input_file_1);
    if (input_file.empty()) {
        std::cout << "input file is null!\n";
        return 1;
    }
    webrtc::WavWriter wav_writer(absl::GetFlag(FLAGS_output_file), sampleRate,
                                 num_channels);

    std::cout << "---------------" << absl::GetFlag(FLAGS_output_file);

    webrtc::test::FilePlayingSource source(input_file);

    bool streams_finished = false;
    webrtc::AudioFrame frame;
    webrtc::AudioFrame write_frame;
    while (!streams_finished) {
        auto ret = source.GetAudioFrameWithInfo(sampleRate, &frame);
        streams_finished = source.FileHasEnded();
        if (streams_finished) {
            continue;
        }
        std::cout << "**********************************************" << (int)ret << std::endl; 
        // 开始降噪
        audio.CopyFrom(frame.data(), stream_config);
        ns.Analyze(audio);
        ns.Process(&audio);
        audio.CopyTo(stream_config, write_frame.mutable_data());

        wav_writer.WriteSamples(write_frame.data(), num_channels * frame.samples_per_channel_);
    }

    std::cout << "Done!\n" << std::endl;
}

//
////写wav文件
//void wavWrite_s16(char *filename, int16_t *buffer, size_t sampleRate, size_t totalSampleCount, unsigned int channels) {
//    drwav_data_format format;
//    format.container = drwav_container_riff;     // <-- drwav_container_riff = normal WAV files, drwav_container_w64 = Sony Wave64.
//    format.channels = channels;
//    format.sampleRate = (drwav_uint32) sampleRate;
//    format.bitsPerSample = sizeof(*buffer) * 8;
//    format.format = DR_WAVE_FORMAT_PCM;
//    drwav wav;
//    drwav_init_file_write(&wav, filename, &format, NULL);
//    drwav_uint64 samplesWritten = drwav_write_pcm_frames(&wav, totalSampleCount, buffer);
//    drwav_uninit(&wav);
//    if (samplesWritten != totalSampleCount) {
//        fprintf(stderr, "ERROR\n");
//        exit(1);
//
//    }
//}
//
////读取wav文件
//short *wavRead_s16(char *filename, uint32_t *sampleRate, uint64_t *totalSampleCount, unsigned int *channels) {
//    short *buffer = drwav_open_file_and_read_pcm_frames_s16(filename, channels, sampleRate, totalSampleCount, NULL);
//    if (buffer == NULL) {
//        printf("ERROR.");
//    }
//    return buffer;
//}
//
////分割路径函数
//void splitpath(const char *path, char *drv, char *dir, char *name, char *ext) {
//    const char *end;
//    const char *p;
//    const char *s;
//    if (path[0] && path[1] == ':') {
//        if (drv) {
//            *drv++ = *path++;
//            *drv++ = *path++;
//            *drv = '\0';
//        }
//    } else if (drv)
//        *drv = '\0';
//    for (end = path; *end && *end != ':';)
//        end++;
//    for (p = end; p > path && *--p != '\\' && *p != '/';)
//        if (*p == '.') {
//            end = p;
//            break;
//        }
//    if (ext)
//        for (s = end; (*ext = *s++);)
//            ext++;
//    for (p = end; p > path;)
//        if (*--p == '\\' || *p == '/') {
//            p++;
//            break;
//        }
//    if (name) {
//        for (s = p; s < end;)
//            *name++ = *s++;
//        *name = '\0';
//    }
//    if (dir) {
//        for (s = path; s < p;)
//            *dir++ = *s++;
//        *dir = '\0';
//    }
//}
//
//using namespace webrtc;
//
//int nsProc(short *input, size_t SampleCount, size_t sampleRate, int num_channels) {
//    AudioBuffer audio(sampleRate, num_channels, sampleRate, num_channels, sampleRate,
//                      num_channels);
//    StreamConfig stream_config(sampleRate, num_channels);
//    NsConfig cfg;
//    /*
//     * NsConfig::SuppressionLevel::k6dB
//     * NsConfig::SuppressionLevel::k12dB
//     * NsConfig::SuppressionLevel::k18dB
//     * NsConfig::SuppressionLevel::k21dB
//     */
////    cfg.target_level = NsConfig::SuppressionLevel::k21dB;
//    NoiseSuppressor ns(cfg, sampleRate, num_channels);
//    short *buffer = input;
//    bool split_bands = sampleRate > 16000;
//    uint64_t frames = (SampleCount / stream_config.num_frames());
//    for (size_t frame_index = 0; frame_index < frames; ++frame_index) {
//        audio.CopyFrom(buffer, stream_config);
//        if (split_bands) {
//            audio.SplitIntoFrequencyBands();
//        }
//        ns.Analyze(audio);
//        ns.Process(&audio);
//        if (split_bands) {
//            audio.MergeFrequencyBands();
//        }
//        audio.CopyTo(stream_config, buffer);
//        buffer += stream_config.num_frames();
//    }
//    return 0;
//}
//
//void WebRtc_DeNoise(char *in_file, char *out_file) {
//    uint32_t sampleRate = 0;
//    uint64_t nSampleCount = 0;
//    uint32_t channels = 1;
//    short *data_in = wavRead_s16(in_file, &sampleRate, &nSampleCount, &channels);
//    if (data_in != NULL) {
//        double startTime = now();
//        short *data_out = (short *) calloc(nSampleCount, sizeof(short));
//        if (data_out != NULL) {
//            nsProc(data_in, nSampleCount, sampleRate, channels);
//            double time_interval = calcElapsed(startTime, now());
//            printf("time interval: %d ms\n ", (int) (time_interval * 1000));
//            wavWrite_s16(out_file, data_in, sampleRate, (uint32_t) nSampleCount, channels);
//            free(data_out);
//        }
//        free(data_in);
//    }
//}
//
//
//int main(int argc, char *argv[]) {
//    printf("webrtc noise suppressor\n");
//    printf("blog:http://cpuimage.cnblogs.com/\n");
//    printf("email:gaozhihan@vip.qq.com\n");
//    if (argc < 2) {
//        printf("usage:\n");
//        printf("./webrtc_ns input.wav\n");
//        printf("or\n");
//        printf("./webrtc_ns input.wav output.wav\n");
//        return -1;
//    }
//    char *in_file = argv[1];
//
//    if (argc > 2) {
//        char *out_file = argv[2];
//        WebRtc_DeNoise(in_file, out_file);
//    } else {
//        char drive[3];
//        char dir[256];
//        char fname[256];
//        char ext[256];
//        char out_file[1024];
//        splitpath(in_file, drive, dir, fname, ext);
//        sprintf(out_file, "%s%s%s_out%s", drive, dir, fname, ext);
//        WebRtc_DeNoise(in_file, out_file);
//    }
//    printf("press any key to exit.\n");
//    getchar();
//    return 0;
//}

