
#include "Stdafx.h"
#include "KinectManager.h"
#include <mmsystem.h>
#include <assert.h>
#include <strsafe.h>

// Static initializers
LPCWSTR NuiManager::KinectManager::GrammarFileName = L"SpeechBasics-D2D.grxml";
//-------------------------------------------------------------------------------------
NuiManager::KinectManager::KinectManager(void)
	:nuiSensor(0),
	lastDepthFPStime(0),
	nuiSkeletonData(0)
{
	// get resolution as DWORDS, but store as LONGs to avoid casts later
	DWORD width = 0;
	DWORD height = 0;

	NuiImageResolutionToSize(cDepthResolution, width, height);
	m_depthWidth  = static_cast<LONG>(width);
	m_depthHeight = static_cast<LONG>(height);

	NuiImageResolutionToSize(cColorResolution, width, height);
	m_colorWidth  = static_cast<LONG>(width);
	m_colorHeight = static_cast<LONG>(height);

	m_colorToDepthDivisor = m_colorWidth/m_depthWidth;

	m_colorRGBX = new BYTE[m_colorWidth*m_colorHeight*4];
	m_outputRGBX = new BYTE[m_colorWidth*m_colorHeight*4];

	// create heap storage for depth pixel data in RGBX format
	m_depthD16 = new USHORT[m_depthWidth*m_depthHeight];
	m_colorCoordinates = new LONG[m_depthWidth*m_depthHeight*2];

	colorBuffer = (BYTE*) malloc (640 * 480 * 4 * sizeof(BYTE));
	depthBuffer = (BYTE*) malloc (640 * 480 * 4 * sizeof(BYTE));
	personMaskBuffer = (BYTE*) malloc (640 * 480 * 4 * sizeof(BYTE));
}

//-------------------------------------------------------------------------------------
NuiManager::KinectManager::~KinectManager(void)
{
}

//-------------------------------------------------------------------------------------
HRESULT NuiManager::KinectManager::InitNui(void)
{	
	HRESULT hr;

	nuiSkeletonData = (NUI_SKELETON_DATA*)malloc(NUI_SKELETON_COUNT * sizeof (NUI_SKELETON_DATA));

	if(!nuiSensor)
	{
		hr = NuiCreateSensorByIndex(0, &nuiSensor);
		if(FAILED(hr)) return hr;
		
		instanceId = nuiSensor->NuiDeviceConnectionId();
	}

	hNextDepthFrameEvent = CreateEvent( NULL, TRUE, FALSE, NULL );
	hNextColorFrameEvent = CreateEvent( NULL, TRUE, FALSE, NULL );
	hNextSkeletonEvent = CreateEvent( NULL, TRUE, FALSE, NULL );

	// Initialize Nui
	DWORD nuiFlags = NUI_INITIALIZE_FLAG_USES_DEPTH_AND_PLAYER_INDEX | NUI_INITIALIZE_FLAG_USES_SKELETON |  NUI_INITIALIZE_FLAG_USES_COLOR | NUI_INITIALIZE_FLAG_USES_AUDIO;
	hr = nuiSensor->NuiInitialize( nuiFlags );

	if ( E_NUI_SKELETAL_ENGINE_BUSY == hr )
	{
		nuiFlags = NUI_INITIALIZE_FLAG_USES_DEPTH |  NUI_INITIALIZE_FLAG_USES_COLOR;
		hr = nuiSensor->NuiInitialize( nuiFlags) ;
	}

	// Skeleton Tracking
	if(FAILED(hr)) return hr;

	if ( HasSkeletalEngine( nuiSensor ) )
	{
		hr = nuiSensor->NuiSkeletonTrackingEnable( hNextSkeletonEvent, 0 );
		if(FAILED(hr)) return hr;
	}

	// RGB Image
	hr = nuiSensor->NuiImageStreamOpen(
		NUI_IMAGE_TYPE_COLOR,
		NUI_IMAGE_RESOLUTION_640x480,
		0,
		2,
		hNextColorFrameEvent,
		&pVideoStreamHandle );

	if(FAILED(hr)) return hr;
		
	// Depth Image
	hr = nuiSensor->NuiImageStreamOpen(
		HasSkeletalEngine(nuiSensor) ? NUI_IMAGE_TYPE_DEPTH_AND_PLAYER_INDEX : NUI_IMAGE_TYPE_DEPTH,
		NUI_IMAGE_RESOLUTION_640x480,
		0,
		2,
		hNextDepthFrameEvent,
		&pDepthStreamHandle );

	if(FAILED(hr)) return hr;
	
	// Start the Nui processing thread
	hEvNuiProcessStop = CreateEvent( NULL, FALSE, FALSE, NULL );
	hThNuiProcess = CreateThread( NULL, 0, nuiProcessThread, this, 0, NULL );

		
	return hr;
}
//-------------------------------------------------------------------------------------
void NuiManager::KinectManager::InitializeAudioSystem()
{
	InitAudioStream();
	CreateSpeechRecognizer();
	LoadSpeechGrammar();
	StartSpeechRecognition();
}
//-------------------------------------------------------------------------------------
HRESULT NuiManager::KinectManager::InitAudioStream()
{
	INuiAudioBeam*      pNuiAudioSource = NULL;
	IMediaObject*       pDMO = NULL;
	IPropertyStore*     pPropertyStore = NULL;
	IStream*            pStream = NULL;

	// Get the audio source
	HRESULT hr = nuiSensor->NuiGetAudioSource(&pNuiAudioSource);
	if (SUCCEEDED(hr))
	{
		hr = pNuiAudioSource->QueryInterface(IID_IMediaObject, (void**)&pDMO);

		if (SUCCEEDED(hr))
		{
			hr = pNuiAudioSource->QueryInterface(IID_IPropertyStore, (void**)&pPropertyStore);

			// Set AEC-MicArray DMO system mode. This must be set for the DMO to work properly.
			// Possible values are:
			//   SINGLE_CHANNEL_AEC = 0
			//   OPTIBEAM_ARRAY_ONLY = 2
			//   OPTIBEAM_ARRAY_AND_AEC = 4
			//   SINGLE_CHANNEL_NSAGC = 5
			PROPVARIANT pvSysMode;
			PropVariantInit(&pvSysMode);
			pvSysMode.vt = VT_I4;
			pvSysMode.lVal = (LONG)(2); // Use OPTIBEAM_ARRAY_ONLY setting. Set OPTIBEAM_ARRAY_AND_AEC instead if you expect to have sound playing from speakers.
			pPropertyStore->SetValue(MFPKEY_WMAAECMA_SYSTEM_MODE, pvSysMode);
			PropVariantClear(&pvSysMode);

			// Set DMO output format
			WAVEFORMATEX wfxOut = {AudioFormat, AudioChannels, AudioSamplesPerSecond, AudioAverageBytesPerSecond, AudioBlockAlign, AudioBitsPerSample, 0};
			DMO_MEDIA_TYPE mt = {0};
			MoInitMediaType(&mt, sizeof(WAVEFORMATEX));

			mt.majortype = MEDIATYPE_Audio;
			mt.subtype = MEDIASUBTYPE_PCM;
			mt.lSampleSize = 0;
			mt.bFixedSizeSamples = TRUE;
			mt.bTemporalCompression = FALSE;
			mt.formattype = FORMAT_WaveFormatEx;	
			memcpy(mt.pbFormat, &wfxOut, sizeof(WAVEFORMATEX));

			hr = pDMO->SetOutputType(0, &mt, 0);

			if (SUCCEEDED(hr))
			{
				m_pKinectAudioStream = new KinectAudioStream(pDMO);

				hr = m_pKinectAudioStream->QueryInterface(IID_IStream, (void**)&pStream);

				if (SUCCEEDED(hr))
				{
					hr = CoCreateInstance(CLSID_SpStream, NULL, CLSCTX_INPROC_SERVER, __uuidof(ISpStream), (void**)&m_pSpeechStream);

					if (SUCCEEDED(hr))
					{
						hr = m_pSpeechStream->SetBaseStream(pStream, SPDFID_WaveFormatEx, &wfxOut);
					}
				}
			}

			MoFreeMediaType(&mt);
		}
	}

	SafeRelease(pStream);
	SafeRelease(pPropertyStore);
	SafeRelease(pDMO);
	SafeRelease(pNuiAudioSource);

	return hr;
}
//-------------------------------------------------------------------------------------
/// <summary>
/// Create speech recognizer that will read Kinect audio stream data.
/// </summary>
/// <returns>
/// <para>S_OK on success, otherwise failure code.</para>
/// </returns>
HRESULT NuiManager::KinectManager::CreateSpeechRecognizer()
{
	ISpObjectToken *pEngineToken = NULL;

	HRESULT hr = CoCreateInstance(CLSID_SpInprocRecognizer, NULL, CLSCTX_INPROC_SERVER, __uuidof(ISpRecognizer), (void**)&m_pSpeechRecognizer);

	if (SUCCEEDED(hr))
	{
		m_pSpeechRecognizer->SetInput(m_pSpeechStream, FALSE);
		hr = SpFindBestToken(SPCAT_RECOGNIZERS,L"Language=409;Kinect=True",NULL,&pEngineToken);

		if (SUCCEEDED(hr))
		{
			m_pSpeechRecognizer->SetRecognizer(pEngineToken);
			hr = m_pSpeechRecognizer->CreateRecoContext(&m_pSpeechContext);
		}
	}

	SafeRelease(pEngineToken);

	return hr;
}
//-------------------------------------------------------------------------------------

/// <summary>
/// Load speech recognition grammar into recognizer.
/// </summary>
/// <returns>
/// <para>S_OK on success, otherwise failure code.</para>
/// </returns>
HRESULT NuiManager::KinectManager::LoadSpeechGrammar()
{
	HRESULT hr = m_pSpeechContext->CreateGrammar(1, &m_pSpeechGrammar);

	if (SUCCEEDED(hr))
	{
		// Populate recognition grammar from file
		hr = m_pSpeechGrammar->LoadCmdFromFile(GrammarFileName, SPLO_STATIC);
	}

	return hr;
}
//-------------------------------------------------------------------------------------

/// <summary>
/// Start recognizing speech asynchronously.
/// </summary>
/// <returns>
/// <para>S_OK on success, otherwise failure code.</para>
/// </returns>
HRESULT NuiManager::KinectManager::StartSpeechRecognition()
{
	HRESULT hr = m_pKinectAudioStream->StartCapture();

	if (SUCCEEDED(hr))
	{
		// Specify that all top level rules in grammar are now active
		m_pSpeechGrammar->SetRuleState(NULL, NULL, SPRS_ACTIVE);

		// Specify that engine should always be reading audio
		m_pSpeechRecognizer->SetRecoState(SPRST_ACTIVE_ALWAYS);

		// Specify that we're only interested in receiving recognition events
		m_pSpeechContext->SetInterest(SPFEI(SPEI_RECOGNITION), SPFEI(SPEI_RECOGNITION));

		// Ensure that engine is recognizing speech and not in paused state
		hr = m_pSpeechContext->Resume(0);
		if (SUCCEEDED(hr))
		{
			m_hSpeechEvent = m_pSpeechContext->GetNotifyEventHandle();
		}
	}

	return hr;
}
//-------------------------------------------------------------------------------------
void NuiManager::KinectManager::UnInitNui(void)
{
	// delete data
	//if(nuiColorFrame)		delete nuiColorFrame;
	//if(nuiDepthFrame)		delete nuiDepthFrame;
	//if(nuiSkeletonFrame)	delete nuiSkeletonFrame;
	if(nuiSkeletonData)		delete nuiSkeletonData;

	// stop the Nui processing thread
	if ( hEvNuiProcessStop != NULL )
	{
		// Signal the thread
		SetEvent(hEvNuiProcessStop);

		// Wait for thread to stop
		if ( hThNuiProcess != NULL )
		{
			WaitForSingleObject( hThNuiProcess, INFINITE );
			CloseHandle( hThNuiProcess );
		}
		CloseHandle( hEvNuiProcessStop );
	}

	if (nuiSensor) { nuiSensor->NuiShutdown(); }

	if (hNextSkeletonEvent && (hNextSkeletonEvent != INVALID_HANDLE_VALUE))
	{
		CloseHandle(hNextSkeletonEvent);
		hNextSkeletonEvent = NULL;
	}

	if (hNextDepthFrameEvent && (hNextDepthFrameEvent != INVALID_HANDLE_VALUE))
	{
		CloseHandle(hNextDepthFrameEvent);
		hNextDepthFrameEvent = NULL;
	}

	if (hNextColorFrameEvent && (hNextColorFrameEvent != INVALID_HANDLE_VALUE))
	{
		CloseHandle(hNextColorFrameEvent);
		hNextColorFrameEvent = NULL;
	}

	if (nuiSensor)
	{
		nuiSensor->Release();
		nuiSensor = NULL;
	}

	// delete buffers
	free(depthBuffer);
	free(colorBuffer);
	free(personMaskBuffer);
}


//-------------------------------------------------------------------------------------
size_t NuiManager::KinectManager::getDeviceCount(void)
{
	int result(0); 
	NuiGetSensorCount(&result);
	
	return (size_t) result;
}

//-------------------------------------------------------------------------------------
bool NuiManager::KinectManager::trackSkeleton(void)
{
	NUI_SKELETON_FRAME skeletonFrame = {0};
	bool foundSkeleton = false;

	if(SUCCEEDED(nuiSensor->NuiSkeletonGetNextFrame(0, &skeletonFrame)))
	{
		for ( int i = 0 ; i < NUI_SKELETON_COUNT ; i++ )
		{
			if( skeletonFrame.SkeletonData[i].eTrackingState == NUI_SKELETON_TRACKED ||
				(skeletonFrame.SkeletonData[i].eTrackingState == NUI_SKELETON_POSITION_ONLY))
			{
				foundSkeleton = true;
			}
		}
	}

	// no skeletons!
	if( !foundSkeleton ) { return false; }

	// smooth out the skeleton data
	HRESULT hr = nuiSensor->NuiTransformSmooth(&skeletonFrame, NULL);
	if ( FAILED(hr) ) { return false; }

	//NUI_SKELETON_FRAME temp = skeletonFrame;
	//nuiSkeletonFrame = &temp;

	for(int i = 0; i < NUI_SKELETON_COUNT; i++)
		nuiSkeletonData[i] = skeletonFrame.SkeletonData[i];

	lastSkeletonFoundTime = timeGetTime();

	return true;
}

//-------------------------------------------------------------------------------------
bool NuiManager::KinectManager::trackColorImage(void)
{
	NUI_IMAGE_FRAME imageFrame;
	HRESULT hr = nuiSensor->NuiImageStreamGetNextFrame(pVideoStreamHandle, 0, &imageFrame);
	if(FAILED(hr)) return false;

	INuiFrameTexture* texture = imageFrame.pFrameTexture;

	NUI_LOCKED_RECT lockedRect;
	texture->LockRect(0, &lockedRect, NULL, 0);

	if(lockedRect.Pitch != 0)
	{
		BYTE* pBits = (BYTE*) lockedRect.pBits;
		int offset = 0;

		for(size_t i = 0; i < 640; i++)
		{
			for (size_t j = 0; j < 480; j++)
			{
				colorBuffer[offset++] = pBits[0]; // B
				colorBuffer[offset++] = pBits[1]; // G
				colorBuffer[offset++] = pBits[2]; // R
				colorBuffer[offset++] = 254;      // A

				pBits += 4;
			}
		}
	}

	texture->UnlockRect(0);

	nuiSensor->NuiImageStreamReleaseFrame(pVideoStreamHandle, &imageFrame);
	
	return true;
}

//-------------------------------------------------------------------------------------
bool NuiManager::KinectManager::trackDepthImage(void)
{
	NUI_IMAGE_FRAME imageFrame;
	HRESULT hr = nuiSensor->NuiImageStreamGetNextFrame(pDepthStreamHandle, 0, &imageFrame);
	if(FAILED(hr)) return false;

	INuiFrameTexture* texture = imageFrame.pFrameTexture;

	NUI_LOCKED_RECT lockedRect;
	texture->LockRect(0, &lockedRect, NULL, 0);

	if(lockedRect.Pitch != 0)
	{
		USHORT *pBuff = (USHORT*) lockedRect.pBits;

		for (int i = 0; i < 640 * 480; i++)
		{
			BYTE index = pBuff[i] & 0x07;
            USHORT realDepth = (pBuff[i] & 0xFFF8) >> 3;
            BYTE scale = 255 - (BYTE)(256 * realDepth / 0x0fff);
			int CHANNEL = 4;

            depthBuffer[CHANNEL * i] = depthBuffer[CHANNEL * i + 1] = depthBuffer[CHANNEL * i + 2] = 0;
			depthBuffer[CHANNEL * i + 3 ] = 254;
            
			// Track person mask
			personMaskBuffer[CHANNEL * i] = personMaskBuffer[CHANNEL * i + 1] = personMaskBuffer[CHANNEL * i + 2] = 0;
			personMaskBuffer[CHANNEL * i + 3 ] = 254;

			if(index != 0){
				personMaskBuffer[CHANNEL * i    ] = 255;
				personMaskBuffer[CHANNEL * i + 1] = 255;
				personMaskBuffer[CHANNEL * i + 2] = 255;
			}

			switch (index)
            {
            case 0:
                depthBuffer[CHANNEL * i] = scale / 2;
                depthBuffer[CHANNEL * i + 1] = scale / 2;
                depthBuffer[CHANNEL * i + 2] = scale / 2;
                break;
            case 1:
                depthBuffer[CHANNEL * i] = scale;
                break;
            case 2:
                depthBuffer[CHANNEL * i + 1] = scale;
                break;
            case 3:
                depthBuffer[CHANNEL * i + 2] = scale;
                break;
            case 4:
                depthBuffer[CHANNEL * i] = scale;
                depthBuffer[CHANNEL * i + 1] = scale;
                break;
            case 5:
                depthBuffer[CHANNEL * i] = scale;
                depthBuffer[CHANNEL * i + 2] = scale;
                break;
            case 6:
                depthBuffer[CHANNEL * i + 1] = scale;
                depthBuffer[CHANNEL * i + 2] = scale;
                break;
            case 7:
                depthBuffer[CHANNEL * i] = 255 - scale / 2;
                depthBuffer[CHANNEL * i + 1] = 255 - scale / 2;
                depthBuffer[CHANNEL * i + 2] = 255 - scale / 2;
                break;
            }
		}
	}

	texture->UnlockRect(0);

	nuiSensor->NuiImageStreamReleaseFrame(pDepthStreamHandle, &imageFrame);
	
	
	return true;
}

//-------------------------------------------------------------------------------------
DWORD WINAPI NuiManager::KinectManager::nuiProcessThread(LPVOID pParam)
{
	KinectManager *manager = (KinectManager *) pParam;
	return manager->nuiProcessThread();
}

//-------------------------------------------------------------------------------------
// nuiProcessThread
// Thread to handle Kinect processing
//-------------------------------------------------------------------------------------
DWORD WINAPI NuiManager::KinectManager::nuiProcessThread()
{
	const int numEvents = 4;
	HANDLE hEvents[numEvents] = { hEvNuiProcessStop, hNextDepthFrameEvent, hNextColorFrameEvent, hNextSkeletonEvent };
	int    nEventIdx;
	DWORD  t;

	lastDepthFPStime = timeGetTime();
	lastSkeletonFoundTime = 0;
	//frameRate = 0;
	
	// thread loop
	bool continueProcessing = true;
	while(continueProcessing)
	{
		// wait for any of the events to be signaled
		nEventIdx = WaitForMultipleObjects(numEvents, hEvents, FALSE, 100);

		// process signal events
		if(nEventIdx == WAIT_TIMEOUT)
		{
			continue;
		}
		else if(nEventIdx == WAIT_OBJECT_0)
		{
			continueProcessing = false;
			continue;
		}
		else if(nEventIdx == (WAIT_OBJECT_0 + 1))
		{
			trackDepthImage();
			++depthFramesTotal;
		}
		else if(nEventIdx == (WAIT_OBJECT_0 + 2))
		{
			trackColorImage();
		}
		else if(nEventIdx == (WAIT_OBJECT_0 + 3))
		{
			trackSkeleton();
		}

		t = timeGetTime();
		if((t - lastDepthFPStime) > 1000)
		{
			frameRate = ((depthFramesTotal - lastDepthFramesTotal) * 1000 + 500) / (t - lastDepthFPStime);
			lastDepthFramesTotal = depthFramesTotal;
			lastDepthFPStime = t;
		}
	}

	return 0;
}

//-------------------------------------------------------------------------------------
NUI_SKELETON_DATA* NuiManager::KinectManager::getSkeleton(int playerIndex)
{
	return &nuiSkeletonData[playerIndex];
}

//-------------------------------------------------------------------------------------
BYTE* NuiManager::KinectManager::getColorBuffer()
{
	return colorBuffer;
}

//-------------------------------------------------------------------------------------
BYTE* NuiManager::KinectManager::getDepthBuffer()
{
	return depthBuffer;
}
//-------------------------------------------------------------------------------------
BYTE* NuiManager::KinectManager::getPersonMaskBuffer()
{
	return personMaskBuffer;
}

//-------------------------------------------------------------------------------------
int NuiManager::KinectManager::getSoundEvent()
{
	static HANDLE hEvents = m_hSpeechEvent;

	// Check to see if we have either a message (by passing in QS_ALLINPUT)
	// Or a speech event (hEvents)
	DWORD dwEvent = MsgWaitForMultipleObjectsEx(1, &hEvents, INFINITE, QS_ALLINPUT, MWMO_INPUTAVAILABLE);

	// Check if this is an event we're waiting on and not a timeout or message
	if (WAIT_OBJECT_0 == dwEvent)
	{
		return ProcessSpeech();
	}
}
//-------------------------------------------------------------------------------------
int NuiManager::KinectManager::ProcessSpeech()
{
	int returnValue = -2;
	const float ConfidenceThreshold = 0.3f;

	SPEVENT curEvent;
	ULONG fetched = 0;
	HRESULT hr = S_OK;

	m_pSpeechContext->GetEvents(1, &curEvent, &fetched);

	switch (curEvent.eEventId)
	{
	case SPEI_RECOGNITION:
		if (SPET_LPARAM_IS_OBJECT == curEvent.elParamType)
		{
			// this is an ISpRecoResult
			ISpRecoResult* result = reinterpret_cast<ISpRecoResult*>(curEvent.lParam);
			SPPHRASE* pPhrase = NULL;

			hr = result->GetPhrase(&pPhrase);
			if (SUCCEEDED(hr))
			{
				if ((pPhrase->pProperties != NULL) && (pPhrase->pProperties->pFirstChild != NULL))
				{
					const SPPHRASEPROPERTY* pSemanticTag = pPhrase->pProperties->pFirstChild;
					if (pSemanticTag->SREngineConfidence > ConfidenceThreshold)
					{
						//TurtleAction action = MapSpeechTagToAction(pSemanticTag->pszValue);
						//m_pTurtleController->DoAction(action);
						returnValue = MapSpeechTagToAction(pSemanticTag->pszValue);
					}
				}
				::CoTaskMemFree(pPhrase);
			}
		}
		break;
	}

	
	return returnValue;
}
//-------------------------------------------------------------------------------------
int NuiManager::KinectManager::MapSpeechTagToAction(LPCWSTR pszSpeechTag)
{
	struct SpeechTagToAction
	{
		LPCWSTR pszSpeechTag;
		int action;
	};
	const SpeechTagToAction Map[] =
	{
		{L"RESET", 1},
		{L"WEATHER", 2},
		{L"LEFT", 3},
		{L"RIGHT", 4}
	};

	for (int i = 0; i < _countof(Map); ++i)
	{
		if (0 == wcscmp(Map[i].pszSpeechTag, pszSpeechTag))
		{
			return Map[i].action;
		}
	}

	return -1;
}
