﻿#include <QEventLoop>
#include <QThread>
#include <QDebug>

#include <mmdeviceapi.h>
#include <avrt.h>
#include "common/AVAudioConvert.h"
#include "common/LoopBuffer.h"

#include "WinMicRecord.h"

extern "C" {
#include <libavutil/time.h>
}

#define SAFE_RELEASE(punk) \
    if((NULL != punk)) \
{ (punk)->Release(); (punk)=NULL; }

WinMicRecord::WinMicRecord(QObject *parent) : WinMediaDevice(parent)
{
}

WinMicRecord::~WinMicRecord()
{
    if (this->pDevice)
        SAFE_RELEASE(this->pDevice);
    CoUninitialize();
}

bool WinMicRecord::init()
{
    HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
    if (hr == S_FALSE) {
        qDebug() << "mic com init already";
    }

    pDevice = getDevice();

    hr = pDevice->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL, reinterpret_cast<void **>(&_AudioClient));
    if (FAILED(hr))
    {
        qDebug("Unable to activate audio client: %x.\n", hr);
        return false;
    }

    //获取音频引擎内如共享模式程序的音频流格式
    hr = _AudioClient->GetMixFormat(&micFormat);
    if (FAILED(hr))
    {
        qDebug("Unable to get mix format on audio client: %x.\n", hr);
        return false;
    }

    switch (micFormat->wFormatTag) {
    case WAVE_FORMAT_IEEE_FLOAT:
        qDebug() << "float";
        break;

    case WAVE_FORMAT_EXTENSIBLE:
    {
        PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(micFormat);
        if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT,pEx->SubFormat)) {
            pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
            pEx->Samples.wValidBitsPerSample = 16;

            micFormat->wBitsPerSample = 16;
            micFormat->nBlockAlign = micFormat->nChannels * micFormat->wBitsPerSample / 8;
            micFormat->nAvgBytesPerSec = micFormat->nBlockAlign * micFormat->nSamplesPerSec;
        } else {
            printf("Don't know how to coerce mix format to int-16\n");
            CoTaskMemFree(micFormat);
            _AudioClient->Release();
            return false;
        }
    }
        break;
    default:
        break;
    }

    _FrameSize = (micFormat->wBitsPerSample / 8) * micFormat->nChannels;

    /*
    *AUDCLNT_STREAMFLAGS_EVENTCALLBACK允许设置事件通知回调 SetEventHandle才会有效果
    */
    hr = _AudioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_EVENTCALLBACK, 5 * 10000000, 0, micFormat, NULL);
    if (FAILED(hr))
    {
        qDebug("Error: Unable to initialize audio client: %x.\n", hr);
        return false;
    }
    _AudioSamplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
    if (_AudioSamplesReadyEvent == NULL)
    {
        qDebug("Error: Unable to create samples ready event: %d.\n", GetLastError());
        return false;
    }
    //设置事件通知对象
    hr = _AudioClient->SetEventHandle(_AudioSamplesReadyEvent);
    if (FAILED(hr))
    {
        qDebug("Error: Unable to set ready event: %x.\n", hr);
        return false;
    }
    //生成采集服务
    hr = _AudioClient->GetService(__uuidof(IAudioCaptureClient), (void**)&_CaptureClient);
    if (FAILED(hr))
    {
        qDebug("Error: Unable to get new capture client: %x.\n", hr);
        return false;
    }
    //开始采集
    hr = _AudioClient->Start();
    if (FAILED(hr))
    {
        qDebug("Error: Unable to get new capture client: %x.\n", hr);
        return false;
    }

    qDebug() << "mic param: " << micFormat->nChannels << micFormat->nSamplesPerSec << micFormat->wBitsPerSample;

    if (micFormat->wFormatTag != 65534) {
        qDebug() << "mic error format error";
        exit(0);
    }

    m_param.channel = micFormat->nChannels;
    m_param.sample = micFormat->nSamplesPerSec;
    m_param.format = AV_SAMPLE_FMT_S16;

    return true;
}

IMMDevice *WinMicRecord::getDevice()
{
    IMMDevice* pDevice = NULL;
    IMMDeviceEnumerator *pMMDeviceEnumerator = NULL;
    HRESULT hr = CoCreateInstance(
                __uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL,
                __uuidof(IMMDeviceEnumerator),
                (void**)&pMMDeviceEnumerator);
    if(FAILED(hr)) {
        qDebug() << "error getDevice";
        return NULL;
    }

    if (m_deviceId.isEmpty())
        hr = pMMDeviceEnumerator->GetDefaultAudioEndpoint(eCapture, eCommunications, &pDevice);
    else
        hr = pMMDeviceEnumerator->GetDevice(m_deviceId.toStdWString().c_str(),&pDevice);

    pMMDeviceEnumerator->Release();

    return pDevice;
}

void WinMicRecord::run()
{
    /* 这是在原来线程初始化的 */
    init();

    m_sem.release();

    qDebug() << "mic record thread" << QThread::currentThreadId();
    capture();
    qDebug() << "mic thread run end";
}

bool WinMicRecord::capture()
{
    HRESULT hr;
    QEventLoop loop;
    //    FILE *srcFile = fopen("mic.pcm", "wb");

    int nbSamples = 1024;
    int frameSize = nbSamples * micFormat->nChannels * 2;
    AVAudioConvert convert(micFormat->nSamplesPerSec,micFormat->nChannels,AV_SAMPLE_FMT_S16,
                      44100,1,AV_SAMPLE_FMT_S16);

    int retBytes = 0;
    int retNbSample = 0;

    LoopBuffer buffer(1024 * 6);
    buffer.open(QIODevice::ReadWrite);

    while (TRUE) {
        DWORD waitResult = WaitForSingleObject(_AudioSamplesReadyEvent, INFINITE);

        BYTE *pData = NULL;
        UINT32 framesAvailable = 0;
        DWORD  flags = 0;
        hr = _CaptureClient->GetBuffer(&pData, &framesAvailable, &flags, NULL, NULL);
        if (SUCCEEDED(hr)) {
            if (framesAvailable!=0) {
                if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
                    qDebug() << "no data";
                    pData = NULL;
                } else {
                    int nDataLen = framesAvailable * _FrameSize;/* 数据长度 */

                    buffer.writeData((const char *)pData,nDataLen);

                    if (buffer.usedSize() >= frameSize) {
                        QByteArray tempBuf(frameSize,0);
                        if (buffer.read(tempBuf.data(),frameSize) == frameSize) {
                            AVFrame * srcFrame = av_frame_alloc();
                            srcFrame->sample_rate = micFormat->nSamplesPerSec;
                            srcFrame->channels = micFormat->nChannels;
                            srcFrame->nb_samples = nbSamples;
                            srcFrame->format = AV_SAMPLE_FMT_S16;
                            srcFrame->data[0] = (uint8_t*)tempBuf.data();

                            QByteArray resampleBuf(4096,0);
                            convert.audio_rescale(srcFrame,(uint8_t*)resampleBuf.data(),&retBytes,&retNbSample);

                            AVFrame *dstFrame = av_frame_alloc();
                            dstFrame->sample_rate = 44100;
                            dstFrame->channels = 1;
                            dstFrame->nb_samples = retNbSample;
                            dstFrame->format = AV_SAMPLE_FMT_S16;
                            dstFrame->data[0] = (uint8_t*)resampleBuf.data();

                            //                            fwrite(resampleBuf.data(),1,retBytes,srcFile);

                            AVFrame *emitFrame = av_frame_clone(dstFrame);/* 由接受线程释放 */
                            emitFrame->pts = av_gettime();
                            emit audioData(emitFrame);

                            av_frame_unref(srcFrame);
                            av_frame_free(&srcFrame);

                            av_frame_unref(dstFrame);
                            av_frame_free(&dstFrame);

                        } else {
                            qDebug() << "read error";
                        }
                    }

                    loop.processEvents(QEventLoop::AllEvents);
                }
            }
        }
        _CaptureClient->ReleaseBuffer(framesAvailable);
    }

    return true;
}
