﻿#include <QDebug>
#include <QThread>
#include <QEventLoop>
#include <QTime>

#include <mmdeviceapi.h>
#include <Audioclient.h>
#include <avrt.h>
#include <Functiondiscoverykeys_devpkey.h>

#include "common/AVAudioConvert.h"
#include "common/LoopBuffer.h"
#include "render/WinAudioRender.h"

extern "C" {
#include <libavutil/time.h>
}

#include "WinSpeakerRecord.h"

/*
 * 线程需要独立的com初始化动作
 * 声音在没有声音的几秒钟后，不再发生数据回调
*/

WinSpeakerRecord::WinSpeakerRecord(QObject *parent) : WinMediaDevice(parent)
{
    qDebug() << "com init";
    CoInitialize(NULL);
}

WinSpeakerRecord::~WinSpeakerRecord()
{
    CoUninitialize();
    qDebug() << "com destory";
}

bool WinSpeakerRecord::init()
{
    HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
    if (hr == S_FALSE) {
        qDebug() << "spk com init already";
    }

    m_hEventStop = CreateEvent(NULL, TRUE, FALSE, NULL);
    if(m_hEventStop == NULL) {
        qDebug() << "error m_hEventStop";
        return false;
    }

    m_hEventStarted = CreateEvent(NULL, TRUE, FALSE, NULL);
    if(m_hEventStarted == NULL){
        qDebug() << "error m_hEventStarted";
        return false;
    }
    this->pDevice = getDevice();

    hr = pDevice->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL, (void**)&pAudioClient);
    if(FAILED(hr)) return false;

    hr = pAudioClient->GetDevicePeriod(&hnsDefaultDevicePeriod, NULL);
    if(FAILED(hr)) return false;

    /* 当前机器默认是32位采样率 */
    hr = pAudioClient->GetMixFormat(&pwfx);
    if (FAILED(hr)) return false;

    if(!AdjustFormatTo16Bits(pwfx)) return false;

    hTimerWakeUp = CreateWaitableTimer(NULL, FALSE, NULL);
    if(hTimerWakeUp == NULL) return false;

    SetEvent(m_hEventStarted);

    hr = pAudioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK, 0, 0, pwfx, 0);
    if(FAILED(hr)) return false;

    hr = pAudioClient->GetService(__uuidof(IAudioCaptureClient), (void**)&pAudioCaptureClient);
    if(FAILED(hr)) return false;

    hTask = AvSetMmThreadCharacteristics(L"Capture", &nTaskIndex);
    if (NULL == hTask) return false;

    LARGE_INTEGER liFirstFire;
    liFirstFire.QuadPart = -hnsDefaultDevicePeriod / 2; // negative means relative time
    LONG lTimeBetweenFires = (LONG)hnsDefaultDevicePeriod / 2 / (10 * 1000); // convert to milliseconds

    BOOL bOK = SetWaitableTimer(hTimerWakeUp,&liFirstFire,lTimeBetweenFires,NULL, NULL, FALSE);
    if(!bOK) return false;

    hr = pAudioClient->Start();
    if(FAILED(hr)) return false;

    bStarted = TRUE;

    qDebug() << "spk param: " << pwfx->nChannels << pwfx->nSamplesPerSec << pwfx->wBitsPerSample;

    if (pwfx->wFormatTag != 65534) {
        qDebug() << "spk error format error";
        exit(0);
    }

    m_param.channel = pwfx->nChannels;
    m_param.sample = pwfx->nSamplesPerSec;
    m_param.format = AV_SAMPLE_FMT_S16;

    return SUCCEEDED(hr);
}

IMMDevice *WinSpeakerRecord::getDevice()
{
    IMMDevice* pDevice = NULL;
    IMMDeviceEnumerator *pMMDeviceEnumerator = NULL;
    HRESULT hr = CoCreateInstance(
                __uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL,
                __uuidof(IMMDeviceEnumerator),
                (void**)&pMMDeviceEnumerator);
    if(FAILED(hr)) {
        qDebug() << "error getDevice";
        return NULL;
    }

    if (m_deviceId.isEmpty())
        hr = pMMDeviceEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &pDevice);
    else
        pMMDeviceEnumerator->GetDevice(m_deviceId.toStdWString().c_str(),&pDevice);// 获取特定设备

    pMMDeviceEnumerator->Release();

    return pDevice;
}

void WinSpeakerRecord::run()
{
    init();
    m_sem.release();

    qDebug() << "spk record thread" << QThread::currentThreadId();
    capture();
    qDebug() << "spk thread run end";
}

bool WinSpeakerRecord::capture()
{
    HANDLE waitArray[2] = { m_hEventStop, hTimerWakeUp };
    HRESULT hr = 0;
    UINT32 nNextPacketSize = 0;
    BYTE *pData = NULL;
    UINT32 nNumFramesToRead = 0;
    DWORD dwFlags = 0;

    QEventLoop loop;

    int nbSamples = 1024;
    int frameSize = nbSamples * pwfx->nChannels * 2;
    AVAudioConvert convert(pwfx->nSamplesPerSec,pwfx->nChannels,AV_SAMPLE_FMT_S16,
                      44100,1,AV_SAMPLE_FMT_S16);

    int retBytes = 0;
    int retNbSample = 0;

    LoopBuffer buffer(1024 * 6);
    buffer.open(QIODevice::ReadWrite);

    //    FILE *srcFile = fopen("speaker.pcm", "wb");

    while(TRUE) {
        DWORD dwWaitResult = WaitForMultipleObjects(sizeof(waitArray)/sizeof(waitArray[0]), waitArray, FALSE, INFINITE);
        if(WAIT_OBJECT_0 == dwWaitResult) break;

        if (WAIT_OBJECT_0 + 1 != dwWaitResult) {
            qDebug() << "spk capture error";
            break;
        }

        hr = pAudioCaptureClient->GetNextPacketSize(&nNextPacketSize);
        if(FAILED(hr)) {
            qDebug() << "spk capture error";
            break;
        }

        if (nNextPacketSize == 0) continue;

        hr = pAudioCaptureClient->GetBuffer(&pData,&nNumFramesToRead,&dwFlags,NULL,NULL);

        if (dwFlags & AUDCLNT_BUFFERFLAGS_SILENT) {
            // 对扬声器无效
            qDebug() << QTime::currentTime() << "spk no data" << nNumFramesToRead;
        }

        if(FAILED(hr)) {
            qDebug() << "spk capture error";
            break;
        }

        if (0 != nNumFramesToRead) {
            int nDataLen = pwfx->nBlockAlign * nNumFramesToRead;

            buffer.writeData((const char *)pData,nDataLen);

            if (buffer.usedSize() >= frameSize) {
                QByteArray tempBuf(frameSize,0);
                if (buffer.read(tempBuf.data(),frameSize) == frameSize) {
                    AVFrame * srcFrame = av_frame_alloc();
                    srcFrame->sample_rate = pwfx->nSamplesPerSec;
                    srcFrame->channels = pwfx->nChannels;
                    srcFrame->nb_samples = nbSamples;
                    srcFrame->format = AV_SAMPLE_FMT_S16;
                    srcFrame->data[0] = (uint8_t*)tempBuf.data();

                    QByteArray resampleBuf(4096,0);
                    convert.audio_rescale(srcFrame,(uint8_t*)resampleBuf.data(),&retBytes,&retNbSample);

                    AVFrame *dstFrame = av_frame_alloc();
                    dstFrame->sample_rate = 44100;
                    dstFrame->channels = 1;
                    dstFrame->nb_samples = retNbSample;
                    dstFrame->format = AV_SAMPLE_FMT_S16;
                    dstFrame->data[0] = (uint8_t*)resampleBuf.data();

                    //                    fwrite(resampleBuf.data(),1,retBytes,srcFile);

                    AVFrame *emitFrame = av_frame_clone(dstFrame);/* 由接受线程释放 */
                    emitFrame->pts = av_gettime();
                    emit audioData(emitFrame);

                    av_frame_unref(srcFrame);
                    av_frame_free(&srcFrame);

                    av_frame_unref(dstFrame);
                    av_frame_free(&dstFrame);

                } else {
                    qDebug() << "read error";
                }
            }
            loop.processEvents(QEventLoop::AllEvents);
        }

        pAudioCaptureClient->ReleaseBuffer(nNumFramesToRead);
    }

    if(hTask != NULL)
    {
        AvRevertMmThreadCharacteristics(hTask);
        hTask = NULL;
    }

    if(pAudioCaptureClient != NULL)
    {
        pAudioCaptureClient->Release();
        pAudioCaptureClient = NULL;
    }

    if(pwfx != NULL)
    {
        CoTaskMemFree(pwfx);
        pwfx = NULL;
    }

    if(hTimerWakeUp != NULL)
    {
        CancelWaitableTimer(hTimerWakeUp);
        CloseHandle(hTimerWakeUp);
        hTimerWakeUp = NULL;
    }

    if(pAudioClient != NULL)
    {
        if(bStarted)
        {
            pAudioClient->Stop();
            qDebug() << "spk release error";
        }

        pAudioClient->Release();
        pAudioClient = NULL;
    }

    return 0;
}
