#include <iostream>
#include <memory>
#include "modules/audio_device/include/audio_device.h"
#include "modules/audio_device/include/audio_device_defines.h"
#include <modules/audio_device/include/audio_device_data_observer.h>
#include <api/task_queue/default_task_queue_factory.h>
#include <media/engine/adm_helpers.h>
#include <rtc_base/platform_thread_types.h>
#include <windows.h>

#define ENABLE_WRITE_WAV (1)
#define ENABLE_RENDE_WAV (1)
#if (defined(ENABLE_WRITE_WAV) && (ENABLE_WRITE_WAV != 0)) || (defined(ENABLE_RENDE_WAV) && (ENABLE_RENDE_WAV != 0))
#include <common_audio/wav_file.h>
#endif

#define ENABLE_WRITE_PCM (1)
#define ENABLE_RENDE_PCM (1)
#if defined(ENABLE_WRITE_PCM) && (ENABLE_WRITE_PCM != 0)
#endif
#if defined (ENABLE_RENDE_PCM) && (ENABLE_RENDE_PCM != 0)
#endif

static std::array<int16_t, 480 * 2 * 2> g_audio_buffer;
class MY_AudioTransport : public webrtc::AudioTransport {
public: 
    virtual int32_t RecordedDataIsAvailable(const void* audioSamples,
                                          size_t nSamples,
                                          size_t nBytesPerSample,
                                          size_t nChannels,
                                          uint32_t samplesPerSec,
                                          uint32_t totalDelayMS,
                                          int32_t clockDrift,
                                          uint32_t currentMicLevel,
                                          bool keyPressed,
                                          uint32_t& newMicLevel) {
        // get mic data.
        memcpy(g_audio_buffer.data(), audioSamples, g_audio_buffer.size());
        return 0;
   }
  // Implementation has to setup safe values for all specified out parameters.
    virtual int32_t NeedMorePlayData(size_t nSamples,
                                   size_t nBytesPerSample,
                                   size_t nChannels,
                                   uint32_t samplesPerSec,
                                   void* audioSamples,
                                   size_t& nSamplesOut,  // NOLINT
                                   int64_t* elapsed_time_ms,
                                   int64_t* ntp_time_ms) {
        nSamplesOut = 960;
        *elapsed_time_ms = 10;
        *ntp_time_ms = -1;
        memcpy(audioSamples, g_audio_buffer.data(), g_audio_buffer.size());
        return 0;
    }

  // Method to pull mixed render audio data from all active VoE channels.
  // The data will not be passed as reference for audio processing internally.
    virtual void PullRenderData(int bits_per_sample,
                              int sample_rate,
                              size_t number_of_channels,
                              size_t number_of_frames,
                              void* audio_data,
                              int64_t* elapsed_time_ms,
                              int64_t* ntp_time_ms) {
        RTC_DCHECK_NOTREACHED();
    }
};

class MY_AudioModuleObserver : public webrtc::AudioDeviceDataObserver {
public:
    MY_AudioModuleObserver() = default;
    virtual ~MY_AudioModuleObserver() = default;

    virtual void OnCaptureData(const void* audio_samples,
                             size_t num_samples,
                             size_t bytes_per_sample,
                             size_t num_channels,
                             uint32_t samples_per_sec) {
        {
            static int log_cnt = 0;
            if (log_cnt++ % 10000 == 0) {
                std::cout << __func__ << " called. num_samples: " << num_samples << ", "
                    "bytes_per_sample: " << bytes_per_sample << ", "
                    "num_channesl: " << num_channels << ", "
                    "samples_per_sec: " << samples_per_sec << ", "
                    "thread id: " << rtc::CurrentThreadId() << 
                    std::endl;
            }
        }
#if defined(ENABLE_WRITE_WAV) && (ENABLE_WRITE_WAV != 0)
        static webrtc::WavWriter w { "capture.wav", int(samples_per_sec), num_channels, webrtc::WavFile::SampleFormat::kInt16};
        w.WriteSamples((int16_t*)audio_samples, num_samples * num_channels);
#endif

#if defined(ENABLE_WRITE_PCM) && (ENABLE_WRITE_PCM != 0)
        static FILE *fp = fopen("capture.pcm", "wb");
        fwrite(audio_samples, bytes_per_sample, num_samples, fp);
#endif
    }

    virtual void OnRenderData(const void* audio_samples,
                            size_t num_samples,
                            size_t bytes_per_sample,
                            size_t num_channels,
                            uint32_t samples_per_sec) {
        {
            static int log_cnt = 0;
            if (log_cnt++ % 10000 == 0) {
                std::cout << __func__ << " called. num_samples: " << num_samples << ", "
                    "bytes_per_sample: " << bytes_per_sample << ", "
                    "num_channesl: " << num_channels << ", "
                    "samples_per_sec: " << samples_per_sec << 
                    std::endl;                                
            }
        }
#if defined(ENABLE_RENDE_WAV) && (ENABLE_RENDE_WAV != 0)
        static webrtc::WavWriter w { "rende.wav", int (samples_per_sec), num_channels, webrtc::WavFile::SampleFormat::kInt16};
        w.WriteSamples((int16_t*)audio_samples, num_samples * num_channels);
#endif

#if defined(ENABLE_RENDE_PCM) && (ENABLE_RENDE_PCM != 0)
        static FILE *fp = fopen("rende.pcm", "wb");
        fwrite(audio_samples, bytes_per_sample, num_samples, fp);
#endif
    }
};

int main(int argc, char* argv[]) {
    if (argc > 1) {
        getchar();
    }
    SetConsoleOutputCP(CP_UTF8);
    //system("chcp 65001");

    auto transport = std::make_unique<MY_AudioTransport>();
    auto layer= webrtc::AudioDeviceModule::AudioLayer::kWindowsCoreAudio;
    auto task_factory = webrtc::CreateDefaultTaskQueueFactory();
    auto observer = std::make_unique<MY_AudioModuleObserver>();
    auto adm = webrtc::CreateAudioDeviceWithDataObserver(layer, task_factory.get(), std::move(observer));
    if (!adm) {
        std::cout << __func__ << " call CreateAudioDeviceWithDataObserver failed." << std::endl;
        return 1;
    }

    adm->RegisterAudioCallback(transport.get());
    auto recid = webrtc::AudioDeviceModule::WindowsDeviceType::kDefaultDevice;
    auto playid = webrtc::AudioDeviceModule::WindowsDeviceType::kDefaultDevice;

    auto result = 0;

    adm->Init();
    //webrtc::adm_helpers::Init(adm.get());
#if 1
    result = adm->SetPlayoutDevice(playid);
    result = adm->InitPlayout();
    result = adm->InitSpeaker();
    result = adm->SetStereoPlayout(true);
    result = adm->StartPlayout();
#endif

#if 1
    result = adm->SetRecordingDevice(recid);
    result = adm->InitRecording();
    result = adm->InitMicrophone();
    result = adm->SetStereoRecording(true);
    result = adm->StartRecording();
#endif    

    while(getchar() != 'q') {

    }

cleanup:
    adm->StopPlayout();
    adm->StopRecording();
    adm->Terminate();
    return 0;
}