#include "wasutil.hpp"

DWORD highPriorityTaskIndex = 0;
const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
const IID IID_IAudioClient = __uuidof(IAudioClient);
const IID IID_IAudioRenderClient = __uuidof(IAudioRenderClient);
const IID IID_IAudioCaptureClient = __uuidof(IAudioCaptureClient);


// Callbacks=========================================================================
DWORD WINAPI playCallbackHandler(LPVOID userdata) {
    struct coreinfo* ci = (struct coreinfo*)userdata;
    BYTE* pData;

    HANDLE hTask = AvSetMmThreadCharacteristics(TEXT("Pro Audio"), &highPriorityTaskIndex);
    if (hTask == NULL) {
        printf("Failed to set thread priority as Pro Audio\n");
    }
    while (true) {
        DWORD ret = WaitForSingleObject(ci->hPlayCallbackEvent, 2000);
        if (ret != WAIT_OBJECT_0) {
            throw "Timeout waiting for play callback";
        }

        HRESULT hr;

        UINT32 currentPadding = 0;
        hr = ci->pPlayAudioClient->GetCurrentPadding(&currentPadding);
        if (FAILED(hr)) {
            throw "Failed to get play buffer padding";
        }

        UINT32 requestBufferFrameCount = ci->playBufferFrameCount - currentPadding;
        UINT32 requestBufferByteCount = requestBufferFrameCount * 2;
        hr = ci->pRenderClient->GetBuffer(requestBufferFrameCount, &pData);
        if (FAILED(hr)) {
            throw "Failed to get play buffer";
        }

        ci->trackMixer->readSound((char*)pData, requestBufferByteCount);
        for(UINT32 i=0;i<requestBufferFrameCount;i++){
            ((short*)pData)[i]=ntohs(((short*)pData)[i]);
        }

        hr = ci->pRenderClient->ReleaseBuffer(requestBufferFrameCount, 0);
        if (FAILED(hr)) {
            throw "Failed to release play buffer";
        }
    }
}

extern Slab<Packet> sendPayloadPool;
extern Slab<struct sockaddr_in> fromaddrPool;
DWORD WINAPI recordCallbackHandler(LPVOID userdata) {
    struct coreinfo* ci = (struct coreinfo*)userdata;
    BYTE* pData;

    HANDLE hTask = AvSetMmThreadCharacteristics(TEXT("Pro Audio"), &highPriorityTaskIndex);
    if (hTask == NULL) {
        printf("Failed to set thread priority as Pro Audio\n");
    }
    while (true) {
        DWORD ret = WaitForSingleObject(ci->hRecordCallbackEvent, 2000);
        if (ret != WAIT_OBJECT_0) {
            throw "Timeout waiting for record callback";
        }

        HRESULT hr;

        UINT32 packetLength = 0;
        hr = ci->pCaptureClient->GetNextPacketSize(&packetLength);
        if (FAILED(hr)) {
            throw "Failed to get record next packet size";
        }

        UINT32 recordedBufferFrameCount;
        DWORD recordStatusFlags;
        hr = ci->pCaptureClient->GetBuffer(
            &pData,
            &recordedBufferFrameCount,
            &recordStatusFlags,
            NULL,
            NULL
        );
        UINT32 recordedBufferByteCount = recordedBufferFrameCount * 2; 
        if (hr == AUDCLNT_S_BUFFER_EMPTY) {
            printf("No data available\n");
        }
        if (FAILED(hr)) {
            throw "Failed to get record buffer";
        }
        if (recordStatusFlags & AUDCLNT_BUFFERFLAGS_SILENT) {
            printf("Silent\n");
            memset(pData, 0, recordedBufferByteCount);
            // fortunately 2 char 0 is 1 short 0
        }
        if (recordStatusFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) {
            printf("Glitch detected when recording\n");
        }
        // We don't care about _TIMESTAMP_ERROR

        for (UINT32 i = 0; i < recordedBufferFrameCount; i++) {
            ((short*)pData)[i] = htons(((short*)pData)[i]);
        }
        Packet *p=ci->vocal_channel->sendAndGetPacketBack(pData,recordedBufferByteCount,0);
        /* Again, on Windows, we can't enable multicast loopback or
          * we'll be loopback-only. 
          * So we have to manually do vocal_track->receiveSound here. 
         */
        ci->vocal_track->receiveSound(p);

        fromaddrPool.release(p->from_addr);
        p->~Packet();
        sendPayloadPool.release(p);

        hr = ci->pCaptureClient->ReleaseBuffer(recordedBufferFrameCount);
        if (FAILED(hr)) {
            throw "Failed to release record buffer";
        }
    }
}

DWORD WINAPI appAudioCallbackHandler(LPVOID userdata) {
    struct coreinfo* ci = (struct coreinfo*)userdata;
    BYTE* pData;

    HANDLE hTask = AvSetMmThreadCharacteristics(TEXT("Pro Audio"), &highPriorityTaskIndex);
    if (hTask == NULL) {
        printf("Failed to set thread priority as Pro Audio\n");
    }
    while (true) {
        DWORD ret = WaitForSingleObject(ci->hAppAudioCallbackEvent, 2000);
        if (ret != WAIT_OBJECT_0) {
            throw "Timeout waiting for record callback";
        }

        HRESULT hr;

        UINT32 packetLength = 0;
        hr = ci->pAppAudioCaptureClient->GetNextPacketSize(&packetLength);
        if (FAILED(hr)) {
            throw "Failed to get record next packet size";
        }

        UINT32 recordedBufferFrameCount;
        DWORD recordStatusFlags;
        hr = ci->pAppAudioCaptureClient->GetBuffer(
            &pData,
            &recordedBufferFrameCount,
            &recordStatusFlags,
            NULL,
            NULL
        );
        UINT32 recordedBufferByteCount = recordedBufferFrameCount * 2;
        if (hr == AUDCLNT_S_BUFFER_EMPTY) {
            printf("No data available\n");
        }
        if (FAILED(hr)) {
            throw "Failed to get record buffer";
        }
        if (recordStatusFlags & AUDCLNT_BUFFERFLAGS_SILENT) {
            printf("Silent\n");
            memset(pData, 0, recordedBufferByteCount);
            // fortunately 2 char 0 is 1 short 0
        }
        if (recordStatusFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) {
            printf("Glitch detected when recording app audio\n");
        }
        // We don't care about _TIMESTAMP_ERROR

        for (UINT32 i = 0; i < recordedBufferFrameCount; i++) {
            ((short*)pData)[i] = htons(((short*)pData)[i]);
        }
        ci->vocal_channel->send(pData, recordedBufferByteCount, 0);

        hr = ci->pAppAudioCaptureClient->ReleaseBuffer(recordedBufferFrameCount);
        if (FAILED(hr)) {
            throw "Failed to release record buffer";
        }
    }
}

// Callbacks End====================================================================

void startAppAudio(struct coreinfo* ci) {
    HRESULT hr;
    IMMDeviceEnumerator* pEnumerator = NULL;
    IMMDevice* pDevice = NULL;

    hr = CoCreateInstance(
        CLSID_MMDeviceEnumerator, NULL,
        CLSCTX_ALL, IID_IMMDeviceEnumerator,
        (void**)&pEnumerator
    );
    if (FAILED(hr)) {
        throw "Failed to create device enumerator when starting app audio";
    }

    hr = pEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &pDevice);
    if (FAILED(hr)) {
        throw "Failed to get default play device when starting app audio";
    }

    hr = pDevice->Activate(
        IID_IAudioClient, CLSCTX_ALL,
        NULL, (void**)&ci->pAppAudioClient
    );
    if (FAILED(hr)) {
        throw "Failed to activate default output device when starting app audio";
    }

    REFERENCE_TIME minBufferDuration;
    hr = ci->pAppAudioClient->GetDevicePeriod(NULL, &minBufferDuration);
    if (FAILED(hr)) {
        throw "Failed to get minimal app audio buffer duration";
    }

    // When calling startAppAudio, startPlaying must have been called
    // So ci->format is initialized. 
    hr = ci->pAppAudioClient->Initialize(
        AUDCLNT_SHAREMODE_SHARED,
        AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM | AUDCLNT_STREAMFLAGS_LOOPBACK,
        minBufferDuration,
        minBufferDuration,
        &ci->format,
        NULL
    );
    if (FAILED(hr)) {
        throw "Failed to initialize app audio client";
    }


    ci->hAppAudioCallbackEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
    if (ci->hAppAudioCallbackEvent == NULL) {
        throw "Failed to create app audio callback event";
    }

    hr = ci->pAppAudioClient->SetEventHandle(ci->hAppAudioCallbackEvent);
    if (FAILED(hr)) {
        throw "Failed to set app audio callback";
    };

    hr = ci->pAppAudioClient->GetService(
        IID_IAudioCaptureClient,
        (void**)&ci->pAppAudioCaptureClient
    );
    if (FAILED(hr)) {
        throw "Failed to get app audio capture client";
    }

    CreateThread(NULL, 0, appAudioCallbackHandler, ci, 0, NULL);

    hr = ci->pAppAudioClient->Start();
    if (FAILED(hr)) {
        throw "Failed to start recording";
    }
    return;
}

void stopAppAudio(struct coreinfo* ci) {
    HRESULT hr;
    hr = ci->pAppAudioClient->Stop();
    if (FAILED(hr)) {
        throw "Failed to stop recording";
    }

    return;
}

void startSinging(struct coreinfo* ci) {
    HRESULT hr;
    IMMDeviceEnumerator* pEnumerator = NULL;
    IMMDevice* pDevice = NULL;

    hr = CoCreateInstance(
        CLSID_MMDeviceEnumerator, NULL,
        CLSCTX_ALL, IID_IMMDeviceEnumerator,
        (void**)&pEnumerator
    );
    if (FAILED(hr)) {
        throw "Failed to create device enumerator when starting singing";
    }

    hr = pEnumerator->GetDefaultAudioEndpoint(eCapture, eConsole, &pDevice);
    if (FAILED(hr)) {
        throw "Failed to get default record device";
    }

    hr = pDevice->Activate(
        IID_IAudioClient, CLSCTX_ALL,
        NULL, (void**)&ci->pRecordAudioClient
    );
    if (FAILED(hr)) {
        throw "Failed to activate record device";
    }

    REFERENCE_TIME minBufferDuration;
    hr = ci->pRecordAudioClient->GetDevicePeriod(NULL, &minBufferDuration);
    if (FAILED(hr)) {
        throw "Failed to get minimal record buffer duration";
    }

    // When calling startSinging, startPlaying must have been called
    // So ci->format is initialized. 
    hr = ci->pRecordAudioClient->Initialize(
        AUDCLNT_SHAREMODE_SHARED,
        AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM,
        minBufferDuration,
        minBufferDuration,
        &ci->format,
        NULL
    );
    if (FAILED(hr)) {
        throw "Failed to initialize record audio client";
    }

    ci->hRecordCallbackEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
    if (ci->hRecordCallbackEvent==NULL) {
        throw "Failed to create record callback event";
    }

    hr = ci->pRecordAudioClient->SetEventHandle(ci->hRecordCallbackEvent);
    if (FAILED(hr)) {
        throw "Failed to set record callback";
    };

    hr = ci->pRecordAudioClient->GetService(
        IID_IAudioCaptureClient,
        (void**)&ci->pCaptureClient
    );
    if (FAILED(hr)) {
        throw "Failed to get audio capture client";
    }

    CreateThread(NULL, 0, recordCallbackHandler, ci, 0, NULL);

    hr = ci->pRecordAudioClient->Start();
    if (FAILED(hr)) {
        throw "Failed to start recording";
    }

    return;
}

void stopSinging(struct coreinfo* ci) {
    HRESULT hr;
    hr = ci->pRecordAudioClient->Stop();
    if (FAILED(hr)) {
        throw "Failed to stop recording";
    }

    return;
}

void startPlaying(struct coreinfo* ci) {
    HRESULT hr;
    IMMDeviceEnumerator* pEnumerator = NULL;
    IMMDevice* pDevice = NULL;
    //HANDLE playCallbackThread;

    hr = CoCreateInstance(
        CLSID_MMDeviceEnumerator, NULL,
        CLSCTX_ALL, IID_IMMDeviceEnumerator,
        (void**)&pEnumerator);
    if (FAILED(hr)) {
        throw "Failed to create device enumerator when starting playing";
    }

    hr = pEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &pDevice);
    if (FAILED(hr)) {
        throw "Failed to get default play device";
    }

    hr = pDevice->Activate(
        IID_IAudioClient, CLSCTX_ALL,
        NULL, (void**)&ci->pPlayAudioClient
    );
    if (FAILED(hr)) {
        throw "Failed to activate play device";
    }

    ci->format.wFormatTag = WAVE_FORMAT_PCM;
    ci->format.nChannels = 1;
    ci->format.nSamplesPerSec = 44100;
    ci->format.wBitsPerSample = 16;
    ci->format.nBlockAlign = ci->format.nChannels * ci->format.wBitsPerSample / 8;
    ci->format.nAvgBytesPerSec = ci->format.nSamplesPerSec * ci->format.nBlockAlign;
    ci->format.cbSize = 0;

    REFERENCE_TIME minBufferDuration;
    hr = ci->pPlayAudioClient->GetDevicePeriod(NULL, &minBufferDuration);
    if (FAILED(hr)) {
        throw "Failed to get minimal play buffer duration";
    }

    hr = ci->pPlayAudioClient->Initialize(
        AUDCLNT_SHAREMODE_SHARED,
        AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM | AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
        minBufferDuration,
        minBufferDuration,
        &ci->format,
        NULL
    );
    if (FAILED(hr)) {
        throw "Failed to initialize play audio client";
    }

    hr = ci->pPlayAudioClient->GetBufferSize(&ci->playBufferFrameCount);
    if (FAILED(hr)) {
        throw "Failed to get buffer size";
    }

    ci->hPlayCallbackEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
    if (ci->hPlayCallbackEvent == NULL) {
        throw "Failed to create event for play callback";
    }

    hr = ci->pPlayAudioClient->SetEventHandle(ci->hPlayCallbackEvent);
    if (FAILED(hr)) {
        throw "Failed to set play callback";
    }

    hr = ci->pPlayAudioClient->GetService(
        IID_IAudioRenderClient,
        (void**)&ci->pRenderClient
    );
    if (FAILED(hr)) {
        throw "Failed to get service render client";
    }

    CreateThread(NULL, 0, playCallbackHandler, ci, 0, NULL);

    hr = ci->pPlayAudioClient->Start();
    if (FAILED(hr)) {
        throw "Failed to start audio client";
    }
}
