
#include "Main.h"

#include "AudioHelper.h"

#include <Audioclient.h>
#include <Audiopolicy.h>
#include <Mmdeviceapi.h>
#include <Windows.h>

#define SIGNAL_BUFFER_SIZE ( 64 * 1024 )

const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
const IID IID_IAudioClient = __uuidof(IAudioClient);
const IID IID_IAudioCaptureClient = __uuidof(IAudioCaptureClient);

static IMMDevice* g_pxDevice = 0;
static IAudioClient* g_pxAudioClient = 0;
static IAudioCaptureClient* g_pxCaptureClient = 0;
static char* g_pcSignalBuffer = 0;
static u_int g_uBlockAlign = 0;
static u_int g_uChannels = 0;
static u_int g_uBufferWritePos = 0;

REFERENCE_TIME s_uDefaultDevicePeriod;

void AudioHelper::Platform_Initialise()
{
	static IMMDeviceEnumerator* pxEnumerator = 0;

	g_pcSignalBuffer = new char[ SIGNAL_BUFFER_SIZE ];
	memset( g_pcSignalBuffer, 0, SIGNAL_BUFFER_SIZE );

	HRESULT uRes = CoInitializeEx( 0, COINIT_MULTITHREADED );
	if( FAILED( uRes ) )
	{
		return;
	}

	uRes = CoCreateInstance( CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&pxEnumerator);
	if( FAILED( uRes ) )
	{
		return;
	}
	
	uRes = pxEnumerator->GetDefaultAudioEndpoint( eRender, eConsole, &g_pxDevice );
	pxEnumerator->Release();
	pxEnumerator = 0;
	if( FAILED( uRes ) )
	{
		return;
	}

	uRes = g_pxDevice->Activate( IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&g_pxAudioClient );
	if( FAILED( uRes ) )
	{
		g_pxAudioClient->Release();
		g_pxAudioClient = 0;
		return;
	}

	WAVEFORMATEX* pxWavFmt = 0;
	uRes = g_pxAudioClient->GetDevicePeriod(&s_uDefaultDevicePeriod, NULL);
	uRes = g_pxAudioClient->GetMixFormat( &pxWavFmt );
	if( !FAILED( uRes ) )
	{
		switch ( pxWavFmt->wFormatTag ) 
		{
			case WAVE_FORMAT_IEEE_FLOAT:
			{
				pxWavFmt->wFormatTag = WAVE_FORMAT_PCM;
				pxWavFmt->wBitsPerSample = 16;
				pxWavFmt->nBlockAlign = pxWavFmt->nChannels * pxWavFmt->wBitsPerSample / 8;
				pxWavFmt->nAvgBytesPerSec = pxWavFmt->nBlockAlign * pxWavFmt->nSamplesPerSec;
			}
			break;

            case WAVE_FORMAT_EXTENSIBLE:
			{
				PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pxWavFmt);
				if( IsEqualGUID( KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat ) )
				{
					pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
					pEx->Samples.wValidBitsPerSample = 16;
					pxWavFmt->wBitsPerSample = 16;
					pxWavFmt->nBlockAlign = pxWavFmt->nChannels * pxWavFmt->wBitsPerSample / 8;
					pxWavFmt->nAvgBytesPerSec = pxWavFmt->nBlockAlign * pxWavFmt->nSamplesPerSec;
				}
				else 
				{
					// Can't get this into 16bit
					uRes = 1;
				}
				break;
			}
		}

		g_uBlockAlign = pxWavFmt->nBlockAlign;
		g_uChannels = pxWavFmt->nChannels;

		if( !FAILED( uRes ) )
		{
			uRes = g_pxAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK, 0/*s_uDefaultDevicePeriod*/, 0, pxWavFmt, NULL);
			CoTaskMemFree( pxWavFmt );
			if( !FAILED( uRes ) )
			{
				uRes = g_pxAudioClient->GetService( IID_IAudioCaptureClient, (void**)&g_pxCaptureClient );
				if( !FAILED( uRes ) )
				{
					uRes = g_pxAudioClient->Start();
				}
				else
				{
					g_pxCaptureClient->Release();
					g_pxCaptureClient = 0;
				}
			}
		}
	}
	if( FAILED( uRes ) )
	{
		g_pxDevice->Release();
		g_pxDevice = 0;

		g_pxAudioClient->Release();
		g_pxAudioClient = 0;
		return;
	}
}

void AudioHelper::Platform_Update()
{
	if( !g_pxAudioClient )
	{
		return;
	}

	u_int uNumFrames = 0;
	do
	{
		HRESULT uRes = g_pxCaptureClient->GetNextPacketSize( &uNumFrames );
		if( FAILED( uRes ) )
		{
			break;
		}
		if( uNumFrames != 0 )
		{
			DWORD uFlags;
			BYTE* pcData;
			uRes = g_pxCaptureClient->GetBuffer( &pcData, &uNumFrames, &uFlags, NULL, NULL);
			if( FAILED( uRes ) || !uNumFrames )
			{
				break;
			}
			u_int uToWrite = uNumFrames * g_uBlockAlign;
			if( g_uBufferWritePos + uToWrite >= SIGNAL_BUFFER_SIZE )
			{
				const u_int uToEnd = ( SIGNAL_BUFFER_SIZE - g_uBufferWritePos );
				memcpy( &g_pcSignalBuffer[g_uBufferWritePos], pcData, uToEnd );
				uToWrite -= uToEnd;
				pcData += uToEnd;

				g_uBufferWritePos = 0;
			}
			if( uToWrite )
			{
				memcpy( &g_pcSignalBuffer[g_uBufferWritePos], pcData, uToWrite );
			}
			g_uBufferWritePos += uToWrite;

			g_pxCaptureClient->ReleaseBuffer( uNumFrames );
		}
	}
	while( uNumFrames );

	// Place the samples into a format fridge can use

	// Expect 2 channels for now
	
	// If the buffer wrapped, just take the end of the buffer, we don't particularly care about being super accurate
	const u_int uCopyFrom = g_uBufferWritePos < ( uNUM_OSC_SAMPLES * g_uBlockAlign ) ? ( SIGNAL_BUFFER_SIZE - uNUM_OSC_SAMPLES * g_uBlockAlign ) : ( g_uBufferWritePos - ( uNUM_OSC_SAMPLES * g_uBlockAlign ) );

	char* pcBufferPos = &g_pcSignalBuffer[ uCopyFrom ];
	for( u_int uFrame = 0; uFrame < uNUM_OSC_SAMPLES; ++uFrame )
	{
		short* psSample = reinterpret_cast< short* >( pcBufferPos );

		s_afLeftSamples[ uFrame ] = static_cast< float >( *psSample ) / 32768.0f;
		
		++psSample;

		s_afRightSamples[ uFrame ] = static_cast< float >( *psSample ) / 32768.0f;

		pcBufferPos += g_uBlockAlign;
	}
}

void AudioHelper::Platform_Shutdown()
{
	if( g_pxAudioClient )
	{
		g_pxCaptureClient->Release();
		g_pxCaptureClient = 0;

		g_pxAudioClient->Release();
		g_pxAudioClient = 0;

		g_pxDevice->Release();
		g_pxDevice = 0;
	}

	CoUninitialize();

	delete []g_pcSignalBuffer;
}

// eof
