//
// Copyright (C) Robert Bosch GmbH
// Date: March 2010
// Author: CR/RTC2-AP Jim Lundberg

#include "CommonUtil.h"
#include "UIOSAL.h"
#include "UIConfigResourceManager.h"
#include "UILogger.h"
#include "UISpeechEvent.h"
#include "HybridSpeechRecognizer.h"

#include "vocon45wrapper.h"
#include "AudioLogger.h"

#include <stddef.h>  /* Needed for the size_t definition. */
#include <string>
#include <vector>
#include <map>
#include <fstream>
#include <time.h>

#include "vocon3200_base.h"
#include "vocon3200_asr.h"
#include "vocon3200_sem.h"
#include "vocon3200_pron.h"
#include "vocon3200_gram2.h"
#include "vocon3200_sse.h"
#include "a2s_asr2sem.h"
#include "vocon_ext_filter.h"
#include "vocon_ext_heap.h"
#include "vocon_ext_stream.h"
#include "vocon_ext_audioin.h"
#include "vocon_ext_asr2sem.h"

using namespace std;

bool Vocon45::initialize(void)
{
	if (!m_pConfigurator->getFileName(MODULE_SpeechManager, "vocon", "acmFileName", cfgData.acmFileName)) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "acmFile is not defined.");
		return false;
	}

	cfgData.ddg2pFileName = "";
	cfgData.dictFileName = "";
	if (!m_pConfigurator->getFileName(MODULE_SpeechManager, "vocon", "ddg2pFileName", cfgData.ddg2pFileName) &&
		!m_pConfigurator->getFileName(MODULE_SpeechManager, "vocon", "dictFileName", cfgData.dictFileName)) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "Both ddg2pFile and dictFile are not defined.");
		return false;
	}

	if (!m_pConfigurator->getFileName(MODULE_SpeechManager, "vocon", "startWave", cfgData.startWaveFileName)) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "startWaveFile is not defined.");
		return false;
	}
	if (!m_pConfigurator->getFileName(MODULE_SpeechManager, "vocon", "endWave", cfgData.endWaveFileName)) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "endWaveFile is not defined.");
		return false;
	}
	if (!m_pConfigurator->getIntValue(MODULE_SpeechManager, "vocon", "numberOfContexts", cfgData.numberOfContexts)) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "numberOfContexts is not defined.");
		return false;
	}
	if (!m_pConfigurator->getIntValue(MODULE_SpeechManager, "vocon", "numberOfSem", cfgData.numberOfSem)) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "numberOfSem is not defined.");
		return false;
	}

	cfgData.nBest = 3;
	m_pConfigurator->getIntValue(MODULE_SpeechManager, "vocon", "nbest", cfgData.nBest);

	/* load context */
	for (int iCtx=0; iCtx<cfgData.numberOfContexts; iCtx++)
	{
		std::string blockName;
		
		/* load context basic config */
		blockName = "vocon_context_" + toStr(iCtx+1);

		if (!m_pConfigurator->getStringValue(MODULE_SpeechManager, blockName, "contextID", cfgData.cfgCtx[iCtx].id)) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "contextID is not defined for context_" + toStr(iCtx+1));
			return false;
		}
		cfgData.ctxID2Index[cfgData.cfgCtx[iCtx].id] = iCtx;

		if (!m_pConfigurator->getStringValue(MODULE_SpeechManager, blockName, "contextType", cfgData.cfgCtx[iCtx].type)) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "contextType is not defined for context_" + toStr(iCtx+1));
			return false;
		}

		if (!m_pConfigurator->getFileName(MODULE_SpeechManager, blockName, "fileName", cfgData.cfgCtx[iCtx].fileName)) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fileName is not defined for context_" + toStr(iCtx+1));
			return false;
		}

		if (!m_pConfigurator->getStringValue(MODULE_SpeechManager, blockName, "slotLoadingType", cfgData.cfgCtx[iCtx].slotLoadingType)) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "slotLoadingType is not defined for context_" + toStr(iCtx+1));
			return false;
		}

		cfgData.cfgCtx[iCtx].enableExportSlotContext	= false;
		m_pConfigurator->getBoolValue(MODULE_SpeechManager, blockName, "enableExportSlotContext", cfgData.cfgCtx[iCtx].enableExportSlotContext);

		cfgData.cfgCtx[iCtx].grammarName				= "";
		m_pConfigurator->getStringValue(MODULE_SpeechManager, blockName, "grammarName", cfgData.cfgCtx[iCtx].grammarName);

		cfgData.cfgCtx[iCtx].semId						= "";
		m_pConfigurator->getStringValue(MODULE_SpeechManager, blockName, "semObjID", cfgData.cfgCtx[iCtx].semId);

		cfgData.cfgCtx[iCtx].idMappingFileName			= "";
		m_pConfigurator->getFileName(MODULE_SpeechManager, blockName, "idMappingFile", cfgData.cfgCtx[iCtx].idMappingFileName);

		/* load context parameters */
		blockName = "vocon_ctx" + toStr(iCtx+1) + "_params";
		int value;
		if (!m_pConfigurator->getIntValue(MODULE_SpeechManager, blockName, "LH_CTX_PARAM_ACCURACY", value)) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "slotLoadingType is not defined for context_" + toStr(iCtx+1));
			return false;
		}
		cfgData.cfgCtx[iCtx].ctxParamAcc = value;

		if (!m_pConfigurator->getIntValue(MODULE_SpeechManager, blockName, "LH_CTX_PARAM_INITBEAMWIDTH", value)) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "slotLoadingType is not defined for context_" + toStr(iCtx+1));
			return false;
		}
		cfgData.cfgCtx[iCtx].ctxParamInitB = value;

		/* load context slots */
		blockName = "vocon_ctx" + toStr(iCtx+1) + "_slots";
		std::vector<std::string> slotnames;
		m_pConfigurator->getAllParameterNames(MODULE_SpeechManager, blockName, slotnames);
		for (int iSlot=0; iSlot<(int)slotnames.size(); iSlot++) {
			std::string slotFileName;
			m_pConfigurator->getFileName(MODULE_SpeechManager, blockName, slotnames[iSlot], slotFileName); 
			cfgData.cfgCtx[iCtx].slot2FileName[uppercase(slotnames[iSlot])] = slotFileName;
		}

		/* load context fields */
		blockName = "vocon_ctx" + toStr(iCtx+1) + "_fields";
		std::vector<std::string> fieldNames;
		m_pConfigurator->getAllParameterNames(MODULE_SpeechManager, blockName, fieldNames);
		for (int iField=0; iField<(int)fieldNames.size(); iField++) {
			std::string fieldFileName;
			m_pConfigurator->getFileName(MODULE_SpeechManager, blockName, slotnames[iField], fieldFileName); 
			cfgData.cfgCtx[iCtx].field2FileName[uppercase(fieldNames[iField])] = fieldFileName;
		}
	}

	/* load sem object */
	for (int iSem=0; iSem<(int)cfgData.numberOfSem; iSem++)
	{
		std::string blockName = "vocon_sem_" + toStr(iSem+1);
		
		if (!m_pConfigurator->getStringValue(MODULE_SpeechManager, blockName, "semID", cfgData.cfgSem[iSem].semId)) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "semID is not defined for sem_" + toStr(iSem+1));
			return false;
		}
		cfgData.semID2Index[cfgData.cfgSem[iSem].semId] = iSem;

		if (!m_pConfigurator->getFileName(MODULE_SpeechManager, blockName, "fileName", cfgData.cfgSem[iSem].fileName)) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fileName is not defined for sem_" + toStr(iSem+1));
			return false;
		}
	}

	initObjects();
	
	if (!initRecoginer()) {
		closeAll();
		destory();
		return false;
	}
	if (!initContexts()) {
		closeAll();
		destory();
		return false;
	}
	if (!initSem()) {
		closeAll();
		destory();
		return false;
	}
	
	startWV = new char[cfgData.startWaveFileName.length()+1];
	strcpy (startWV, cfgData.startWaveFileName.c_str());
	
	endWV = new char[cfgData.endWaveFileName.length()+1];
	strcpy (endWV, cfgData.endWaveFileName.c_str());
	//startWV = new wchar_t[cfgData.startWaveFileName.length()+1];
	//mbstowcs_s(&pRetVal, startWV, cfgData.startWaveFileName.length()+1, cfgData.startWaveFileName.c_str(), cfgData.startWaveFileName.length());
	//endWV = new wchar_t[cfgData.endWaveFileName.length()+1];
	//mbstowcs_s(&pRetVal, endWV, cfgData.endWaveFileName.length()+1, cfgData.endWaveFileName.c_str(), cfgData.endWaveFileName.length());
	m_isInitialized = true;
	return true;
}

bool Vocon45::destory() {
	LH_ERROR lhErr = LH_OK;  /* Error from the VoCon3200 (lh_) API. */
	PH_ERROR phErr = PH_OK;  /* Error from the private heap (ph_) API. */
	map<string, string>::iterator iter;
	int totalNumOfListCtx=0, totalNumOfFieldCtx=0, j;
	string strSlotName;

	//remove context
	for(int i =0;i<cfgData.numberOfContexts;i++) {
		lhErr = lh_RecRemoveCtx(recObjs.hRec, recObjs.hCtx[i]);
		if (LH_OK != lhErr) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to remove context");
			return false;
		}

		//clear host ctx
		for(iter=cfgData.cfgCtx[i].slot2FileName.begin(),j=0; j<(int)cfgData.cfgCtx[i].slot2FileName.size(); iter++, j++) {
			if (cfgData.cfgCtx[i].type == "slm")
				strSlotName = iter->first;
			else
				strSlotName = cfgData.cfgCtx[i].grammarName + "#" + iter->first;
			lhErr = lh_HostCtxClearCtx(recObjs.hCtx[i], strSlotName.c_str());
			if (LH_OK != lhErr) return false;
		}
		totalNumOfListCtx += (int)cfgData.cfgCtx[i].slot2FileName.size();
		for(iter=cfgData.cfgCtx[i].field2FileName.begin(),j=0; j<(int)cfgData.cfgCtx[i].field2FileName.size(); iter++, j++) {
			if (cfgData.cfgCtx[i].type == "slm")
				strSlotName = iter->first;
			else
				strSlotName = cfgData.cfgCtx[i].grammarName + "#" + iter->first;
			lhErr = lh_HostCtxClearCtx(recObjs.hCtx[i], strSlotName.c_str());
			if (LH_OK != lhErr) return false;
		}
		totalNumOfFieldCtx += (int)cfgData.cfgCtx[i].field2FileName.size();
	}
	for(j=0; j<totalNumOfListCtx; j++) {
		lhErr = lh_ObjClose(&recObjs.phListCtx[j]);
		if (LH_OK != lhErr) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to close list context");
			return false;
		}
	}
	for(j=0; j<totalNumOfFieldCtx; j++) {
		lhErr = lh_ObjClose(&recObjs.phFieldCtx[j]);
		if (LH_OK != lhErr) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to close field context");
			return false;
		}
	}
	
	for(int i =0;i<cfgData.numberOfContexts;i++) {
		lhErr = lh_ObjClose(&recObjs.hCtx[i]);
		if (LH_OK != lhErr) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to close host context");
			return false;
		}
	}

	/* Release the producer - consumer relations. */
	lhErr = lh_ProducerReleaseConsumer(recObjs.hAudioSource);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to release audio source");
		return false;
	}
	lhErr = lh_ProducerReleaseConsumer(recObjs.hFx);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to release fx");
		return false;
	}

	/* Close the ASR objects. */
	lhErr = lh_ObjClose(&(recObjs.hAudioType));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to close audio type");
		return false;
	}
	lhErr = lh_ObjClose(&(recObjs.hRec));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to close recognizer");
		return false;
	}
	lhErr = lh_ObjClose(&(recObjs.hFx));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to close fx");
		return false;
	}
	lhErr = lh_ObjClose(&(recObjs.hAudioSource));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to close audio source");
		return false;
	}
	lhErr = lh_ObjClose(&(recObjs.hAcMod));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to close acm modle");
		return false;
	}

	//release list context pointers
	delete []recObjs.phListCtx;
	delete []recObjs.phFieldCtx;

	//release wchar file names
	delete startWV;
	delete endWV;

	for(int i=0;i<cfgData.numberOfSem;i++) {
		/* Close the SemProc object. */
		if (!lh_ObjIsNull(voconSem.h_semproc[i])) {
			lhErr =  lh_ObjClose(&(voconSem.h_semproc[i]));
			if (LH_OK != lhErr) {
				m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to close sem");
				return false;
			}
		}

		/* Close the SEM component. */
		lhErr = lh_ComponentTerminate(&(voconSem.h_csem[i]));
		if (LH_OK != lhErr) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to terminate sem");
			return false;
		}
		voconSem.h_csem[i] = lh_GetNullComponent();

		/* Close the heap from the SEM component. */
		phErr = ph_DlHeapClose(&voconSem.heap_inst[i]);
		if (PH_OK != phErr) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to close sem heap");
			return false;
		}
	}

	/* Close the ASR and Base component. */
	lhErr = lh_ComponentTerminate(&(recObjs.hCompAsr));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to close asr component");
		return false;
	}
	lhErr = lh_ComponentTerminate(&(recObjs.hCompPron));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to close asr component");
		return false;
	}
	lhErr = lh_ComponentTerminate(&(recObjs.hCompBase));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to close base component");
		return false;
	}

	/* Close the private heap. */
	phErr = ph_CloseWin32PrivateHeap(&(recObjs.pHeapInst));
	if (PH_OK != phErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to close private heap");
		return false;
	}

	return true;
}

SRError Vocon45::recFromFile(const char *wavFileName, SRNLUResult *result) {
	LH_ISTREAM_INTERFACE        istream_pcm_itf;
	void*                       istream_pcm_inst;
	size_t                      number_bytes_read;
	char                        sound_buffer[SOUND_BUF_SIZE];
	LH_AUDIOCHAIN_EVENT_INFO    iEvent;
	LH_AUDIOCHAIN_FINISHED_INFO audiochain_finished_info;
	LH_BOOL                     no_more_data;
	LH_OBJECT                   h_audio_type = lh_GetNullObj();
	LH_ERROR					lhErr = LH_OK;
	string						refbuf;

	pResult = result;

	char convertedFile[4096];
	strcpy(convertedFile, wavFileName);
	strcat(convertedFile, ".tmp.vocon");

	convertFormat(wavFileName, convertedFile);

	/* Create the input stream to the pcm. */
	if (st_CreateStreamReaderFromFile(convertedFile, &istream_pcm_itf, &istream_pcm_inst) != ST_OK) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to open wav file: " + std::string(convertedFile));
		return SR_ERROR_AUDIOSTREAM_FAIL_TO_START;
	}

	/* Create audiotype object. */
	lhErr = lh_CreateAudioTypeSamples(recObjs.hCompAsr, SAMPLE_FREQ, &h_audio_type);
	if (lhErr != LH_OK)	return SR_ERROR_AUDIOSTREAM_FAIL_TO_START;

	/* Starts the complete audio chain. */
	lhErr = lh_AudioSourceStart(recObjs.hAudioSource);
	if (lhErr != LH_OK) return SR_ERROR_AUDIOSTREAM_FAIL_TO_START;

	/* Start the audio loop. */
	audiochain_finished_info = LH_AUDIOCHAIN_NOT_FINISHED;
	while (audiochain_finished_info == LH_AUDIOCHAIN_NOT_FINISHED) {
		no_more_data = LH_FALSE;
		/* Fetch the data from the pcm input stream. */
		number_bytes_read = istream_pcm_itf.pfread(sound_buffer, 1, SOUND_BUF_SIZE, istream_pcm_inst);

		/* Swap bytes to big endian if needed - input file must be little endian */
		//sampleapp_swap_pcmbuffer_to_platform_endianness(sound_buffer,number_bytes_read);

		/* Give a number of data frames SupplyData. */
		if (number_bytes_read!=0) {
			lhErr = lh_AudioSourceSupplyData(recObjs.hAudioSource, sound_buffer, number_bytes_read);
			if (lhErr != LH_OK)	return SR_ERROR_AUDIOSTREAM_FAIL_TO_SUPPLYDATA;
		}

		/* Was this the last data? */
		if (number_bytes_read!=SOUND_BUF_SIZE && !istream_pcm_itf.pferror(istream_pcm_inst)) {
			/* Signal that this is the last buffer. */
			lhErr = lh_AudioSourceNoMoreData(recObjs.hAudioSource);
			if (lhErr != LH_OK)	return SR_ERROR_AUDIOSTREAM_FAIL_TO_SUPPLYDATA;
		}

		while (LH_AUDIOCHAIN_NOT_FINISHED == audiochain_finished_info && !no_more_data) {
			/* Consume the data by using Advance. */
			lhErr = lh_AudioSourceAdvance(recObjs.hAudioSource, &iEvent, &audiochain_finished_info, &no_more_data);
			if (lhErr != LH_OK) return SR_ERROR_AUDIOSTREAM_FAIL_TO_SUPPLYDATA;

			if (iEvent.type != LH_AUDIOCHAIN_NOEVENT) {
				int bStopAudioIn;
				SRError error = SR_SUCCESS;
				if (!handleEvent(iEvent, &bStopAudioIn, error)) 
					return SR_ERROR_ENGINE_EXCEPTION_PROC_EVENT;
			}
		}
	}

	/* Close the audio type object */
	if (!lh_ObjIsNull(h_audio_type)) lh_ObjClose(&h_audio_type);

	/* Close the pcm input stream. */
	if (istream_pcm_inst) istream_pcm_itf.pffinished(istream_pcm_inst);

	return SR_SUCCESS;
}

SRError Vocon45::recFromMic(int sessionID, const char *logWAVFileName, SRNLUResult *result) 
{
	LH_ERROR				lhErr = LH_OK;
	AUDIOIN_ERROR			audioErr = AUDIOIN_OK;
	AUDIOIN_H				hAudioIn = NULL;
	int						bAudioInRunning = 0;
	unsigned long			audioinFrameSizeInSamples;
	AudioLogger				audiologger(m_targetFormat);

	pResult = result;
	m_isRecording = true;

	audioinFrameSizeInSamples = recObjs.audioSourceFrameSizeInBytes*10/2;

	audioErr = audioin_OpenEx(AUDIOIN_WAVE_MAPPER, recObjs.sampleFreq, 5, audioinFrameSizeInSamples, &hAudioIn);
	if (AUDIOIN_OK != audioErr)  {
		return SR_ERROR_RECORD_DEVICE_FAIL_TO_CREATE;
	}

	LH_AUDIOCHAIN_FINISHED_INFO  eFinished;     /* AudioChain finished status. */
	LH_BOOL                      bSupplyData;   /* AudioSource has no more data, supply new data. */
	LH_AUDIOCHAIN_EVENT_INFO     iEvent;         /* AudioChain event. */

	//TODO: replace sndPlaySound()
//	sndPlaySound((LPCSTR)startWV, SND_SYNC);

	/* Start the recognizer.   */
	audiologger.createLogFile(logWAVFileName, sessionID);

	lhErr = lh_AudioSourceStart(recObjs.hAudioSource);
	if (LH_OK != lhErr) {
		if (hAudioIn) {
			if (bAudioInRunning) (void)audioin_Stop(hAudioIn);
			(void)audioin_Close(&hAudioIn);
		}
		audiologger.closeLogFile();
		return SR_ERROR_RECORD_DEVICE_FAIL_TO_START;
	}

	audioErr = audioin_Start(hAudioIn);
	if (AUDIOIN_OK != audioErr) {
		if (hAudioIn) {
			if (bAudioInRunning) (void)audioin_Stop(hAudioIn);
			(void)audioin_Close(&hAudioIn);
		}
		audiologger.closeLogFile();
		return SR_ERROR_AUDIOSTREAM_FAIL_TO_START;
	}
	bAudioInRunning = 1;

	eFinished = LH_AUDIOCHAIN_NOT_FINISHED;
	while (LH_AUDIOCHAIN_NOT_FINISHED == eFinished) {
		AUDIOIN_INFO	audioInInfo;         /* Info regarding the Audioin state. */
		void			*pAudioFrame = NULL;  /* Pointer to an audio frame. */

		/* Get an audio frame from audioin and supply it to AudioSource. */
		audioErr = audioin_GetFrame(hAudioIn, &audioinFrameSizeInSamples, &pAudioFrame, &audioInInfo);
		if (AUDIOIN_OK != audioErr) {
			if (hAudioIn) {
				if (bAudioInRunning) (void)audioin_Stop(hAudioIn);
				(void)audioin_Close(&hAudioIn);
			}
			audiologger.closeLogFile();
			return SR_ERROR_AUDIOSTREAM_FAIL_TO_GETFRAME;
		}
		audiologger.writeBufferToFile(pAudioFrame, audioinFrameSizeInSamples*2);
		/* This function requires the size in bytes, not in samples. */
		lhErr = lh_AudioSourceSupplyData(recObjs.hAudioSource, pAudioFrame, audioinFrameSizeInSamples*2);
		if (LH_OK != lhErr) {
			if (hAudioIn) {
				if (bAudioInRunning) (void)audioin_Stop(hAudioIn);
				(void)audioin_Close(&hAudioIn);
			}
			audiologger.closeLogFile();
			return SR_ERROR_AUDIOSTREAM_FAIL_TO_SUPPLYDATA;
		}

		bSupplyData = LH_FALSE;
		while (LH_AUDIOCHAIN_NOT_FINISHED == eFinished && !bSupplyData) {
			lhErr = lh_AudioSourceAdvance(recObjs.hAudioSource, &iEvent, &eFinished, &bSupplyData);
			if (LH_OK != lhErr) {
				if (hAudioIn) {
					if (bAudioInRunning) (void)audioin_Stop(hAudioIn);
					(void)audioin_Close(&hAudioIn);
				}
				audiologger.closeLogFile();
				return SR_ERROR_AUDIOSTREAM_FAIL_TO_SUPPLYDATA;
			}

			/* Handle possible events. */
			if (iEvent.type != LH_AUDIOCHAIN_NOEVENT) {
				bool	eSuccess;
				int     bStopAudioIn;

				SRError error = SR_SUCCESS;
				eSuccess = handleEvent(iEvent, &bStopAudioIn, error);
				if (!eSuccess) {
					if (hAudioIn) {
						if (bAudioInRunning) (void)audioin_Stop(hAudioIn);
						(void)audioin_Close(&hAudioIn);
					}
					audiologger.closeLogFile();
					return SR_ERROR_ENGINE_EXCEPTION_PROC_EVENT;
				}
/*
				if (SR_SUCCESS != error) {
					if (hAudioIn) {
						if (bAudioInRunning) (void)audioin_Stop(hAudioIn);
						(void)audioin_Close(&hAudioIn);
					}
					//audiologger.closeLogFile();
					return error;
				}
*/
				/* Stop the AudioIn as soon as we know no more audio input will be required. */
				if (bStopAudioIn) {
					/* First return it the buffer. */
					printf("Stopping AudioIn\n");
					audiologger.closeLogFile();
					HybridSpeechRecognizer::endRecording(sessionID);
					m_isRecording = false;
					//TODO: replace sndPlaySound()
				//	sndPlaySound((LPCSTR)endWV, SND_SYNC);

					audioErr = audioin_ReturnFrame(hAudioIn, pAudioFrame);
					if (AUDIOIN_OK != audioErr) {
						if (hAudioIn) {
							if (bAudioInRunning) (void)audioin_Stop(hAudioIn);
							(void)audioin_Close(&hAudioIn);
						}
					}
					pAudioFrame = NULL;
					audioErr = audioin_Stop(hAudioIn);
					bAudioInRunning = 0;
					if (AUDIOIN_OK != audioErr) {
						if (hAudioIn) {
							if (bAudioInRunning) (void)audioin_Stop(hAudioIn);
							(void)audioin_Close(&hAudioIn);
						}
					}
				}
			}
		} /* while (LH_AUDIOCHAIN_NOT_FINISHED == eFinished && !bSupplyData) */

		/* Return the audio frame to audioin, if not done so already. */
		if (pAudioFrame) {
			audioErr = audioin_ReturnFrame(hAudioIn, pAudioFrame);
			if (AUDIOIN_OK != audioErr) {
				if (hAudioIn) {
					if (bAudioInRunning) (void)audioin_Stop(hAudioIn);
					(void)audioin_Close(&hAudioIn);
				}
			}
			pAudioFrame = NULL;
		}	

	} /* while (LH_AUDIOCHAIN_NOT_FINISHED == eFinished) */

	/* Close the AudioIn driver. */
	audioErr = audioin_Close(&hAudioIn);
	if (AUDIOIN_OK != audioErr) {
		if (hAudioIn) {
			if (bAudioInRunning) (void)audioin_Stop(hAudioIn);
			(void)audioin_Close(&hAudioIn);
		}
	}
	return SR_SUCCESS;
}

bool Vocon45::stopRecording(int sessionID)
{
	return false;
}

bool Vocon45::updateSlots(const char *slotName, const char *slotValues)  {
	//slotValues = abc,xyz,mno,...

	return true;
}

bool Vocon45::exportListContext(const char *slotName, const char *slotFileName) {
	return true;
}

bool Vocon45::activateRule(const char *slotNames) {
	return true;
}

bool Vocon45::initObjects() {
	/* Initialization of all handles. */
	recObjs.pHeapInst = NULL;
	recObjs.hCompBase = lh_GetNullComponent();
	recObjs.hCompAsr = lh_GetNullComponent();
	recObjs.hAcMod = lh_GetNullObj();
	recObjs.hCtx[0] = lh_GetNullObj();
	recObjs.hCtx[1] = lh_GetNullObj();
	recObjs.hCtx[2] = lh_GetNullObj();
	recObjs.hAudioType = lh_GetNullObj();
	recObjs.hAudioSource = lh_GetNullObj();
	recObjs.hFx = lh_GetNullObj();
	recObjs.hRec = lh_GetNullObj();
	recObjs.sampleFreq = 0;
	recObjs.audioSourceFrameSizeInBytes = 0;
	recObjs.h_ddg2p = lh_GetNullObj();
	recObjs.h_lex = lh_GetNullObj();
	recObjs.hCompPron = lh_GetNullComponent();
	recObjs.hCompG2P = lh_GetNullComponent();
	return true;
}

bool Vocon45::initRecoginer() {
	LH_ERROR				lhErr = LH_OK;  /* Error from the VoCon3200 (lh_) API. */
	PH_ERROR				phErr = PH_OK;  /* Error from the private heap (ph_) API. */
	ST_ERROR				stErr = ST_OK;  /* Error from the stream (st_) API. */

	LH_HEAP_INTERFACE		HeapInterface;
	LH_ISTREAM_INTERFACE	IStreamInterface;
	void					*pIStreamAcMod = NULL;
	LH_ACMOD_INFO			*pAcModInfo;

	/* Create the private heap that will be used for all memory allocations in the VoCon engine. */
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "creating the private heap.");
	phErr = ph_CreateWin32PrivateHeap(&HeapInterface, &(recObjs.pHeapInst));
	if (PH_OK != phErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to create the private heap.");
		return false;
	}
	/* Create the input stream for the acoustic model. */
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "creating the input stream for the acoustic model.");
	stErr = st_CreateStreamReaderFromFile(cfgData.acmFileName.c_str(), &IStreamInterface, &pIStreamAcMod);
	if (ST_OK != stErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to create the input stream for the acoustic model.");
		return false;
	}
	/* Create a base and an ASR component. */
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "creating the base component.");
	lhErr = lh_InitBase(&HeapInterface, recObjs.pHeapInst, NULL, NULL, &(recObjs.hCompBase));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to create the base component.");
		return false;
	}
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "creating the ASR component.");
	lhErr = lh_InitAsr(recObjs.hCompBase, &HeapInterface, recObjs.pHeapInst, &(recObjs.hCompAsr));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to create the ASR component.");
		return false;
	}
	/* Create the common Asr objects. */
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "creating the acm modle.");
	lhErr = lh_CreateAcMod(recObjs.hCompAsr, &IStreamInterface, pIStreamAcMod, NULL, &(recObjs.hAcMod));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to create the acm modle.");
		return false;
	}
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "creating the audio source.");
	lhErr = lh_CreateAudioSource(recObjs.hCompAsr, &(recObjs.hAudioSource));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to create the audio source.");
		return false;
	}
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "creating the fx module.");
	lhErr = lh_CreateFx(recObjs.hCompAsr, &(recObjs.hFx));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to create the fx module.");
		return false;
	}
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "creating the recognizer.");
	lhErr = lh_CreateRec(recObjs.hCompAsr, &(recObjs.hRec));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to create the recognizer.");
		return false;
	}
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "creating prononiation.");
	lhErr = lh_InitPron(recObjs.hCompBase, &HeapInterface, recObjs.pHeapInst, &(recObjs.hCompPron));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to create the prononiation.");
		return false;
	}
	/* Find out what sampling frequency the acoustic model uses. */
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "finding the sampling frequency.");
	lhErr = lh_AcModBorrowInfo(recObjs.hAcMod, &pAcModInfo);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to find the sampling frequency.");
		return false;
	}
	recObjs.sampleFreq = pAcModInfo->sampleFrequency;
	/* Create an AudioType object with the correct sampling frequency. */
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "creating the AudioType.");
	lhErr = lh_CreateAudioTypeSamples(recObjs.hCompAsr, recObjs.sampleFreq, &(recObjs.hAudioType));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to create the AudioType.");
		return false;
	}
	/* Establish all producer - consumer relations. */
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "connecting to the audio source.");
	lhErr = lh_ProducerSetConsumer(recObjs.hAudioSource, recObjs.hFx);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to connect to the audio source.");
		return false;
	}
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "connecting to the fx source.");
	lhErr = lh_ProducerSetConsumer(recObjs.hFx, recObjs.hRec);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to connect to the fx source.");
		return false;
	}
	
	/* Return without errors. */
	return true;
}

bool Vocon45::initContexts() {
	//all contexts are assumed as hosting context
	LH_ERROR				lhErr = LH_OK;  /* Error from the VoCon3200 (lh_) API. */
	ST_ERROR				stErr = ST_OK;  /* Error from the stream (st_) API. */
	LH_ISTREAM_INTERFACE	IStreamInterface_ddg2p;
	void					*pIStreamInst_ddg2p = NULL;
	LH_DDG2P_INPUTCATEGORY* input_categories;
	size_t                  nbr_of_categories;
	LH_TRANSCRIPTION_SPEC*  trans_spec;
	LH_BOOL					HasInterface = LH_FALSE;
	LH_OBJECT				h_cache = lh_GetNullObj();
	int						nbTotalListCtx = 0, nbTotalFieldCtx = 0;
	int						idxListCtx[4], idxFieldCtx[4];
	int						j;
	map<string, string>::iterator iter;

	//should be less than 3 contexts in current version
	if (cfgData.numberOfContexts > 3) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail, check that this version only support less than 3 contexts.");
		return false;
	}
	//////////////////////create multiple hosting contexts without slots
	for(int i =0;i<cfgData.numberOfContexts;i++) {
		if(!initHostContexts(i)) return false;
		idxListCtx[i] = nbTotalListCtx;
		idxFieldCtx[i] = nbTotalFieldCtx;
		nbTotalListCtx += (int)cfgData.cfgCtx[i].slot2FileName.size();
		nbTotalFieldCtx += (int)cfgData.cfgCtx[i].field2FileName.size();
		idxListCtx[i+1] = nbTotalListCtx;
		idxFieldCtx[i+1] = nbTotalFieldCtx;
	}

	//////////////////////create slot contexts
	//fetch transcription
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "creating the input stream from the ddg2p file.");
	stErr = st_CreateStreamReaderFromFile(cfgData.ddg2pFileName.c_str(), &IStreamInterface_ddg2p, &pIStreamInst_ddg2p);
	if (ST_OK != stErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to create the input stream from the ddg2p file.");
		return false;
	}

	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "creating ddg2p.");
	lhErr = lh_CreateDDG2P(recObjs.hCompPron, &IStreamInterface_ddg2p, pIStreamInst_ddg2p, NULL, &recObjs.h_ddg2p);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to create ddg2p.");
		return false;
	}

	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "fetching input categries in ddg2p.");
	lhErr = lh_DDG2PFetchInputCategories(recObjs.h_ddg2p, &input_categories, &nbr_of_categories);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to fetch input categries in ddg2p.");
		return false;
	}

	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "setting input categries in ddg2p.");
	lhErr = lh_DDG2PSetInputCategory(recObjs.h_ddg2p, input_categories[0].categoryID);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to set input categries in ddg2p.");
		return false;
	}

	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "returning input categries in ddg2p.");
	lhErr = lh_DDG2PReturnInputCategories(recObjs.h_ddg2p, input_categories);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to return input categries in ddg2p.");
		return false;
	}

	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "borrowing transcriptions in ddg2p.");
	lhErr = lh_AcModBorrowTranscriptionSpec(recObjs.hAcMod, &trans_spec);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to borrow transcriptions in ddg2p.");
		return false;
	}

	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "creating lexicon.");
	lhErr = lh_CreateLex (recObjs.hCompPron, trans_spec, &(recObjs.h_lex));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to create lexicon.");
		return false;
	}

	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "setting g2p in lexicon.");
	lhErr = lh_LexSetG2P(recObjs.h_lex, recObjs.h_ddg2p, h_cache);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to set g2p in lexicon.");
		return false;
	}

	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "making sure lexicon has transcription.");
	lhErr = lh_ObjHasInterface(recObjs.h_lex, LH_IID_ITRANSLOOKUP, &HasInterface);
	if (LH_OK != lhErr || !HasInterface) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to set transcription in lexicon.");
		return false;
	}

	//handle list contexts individually
	recObjs.phListCtx = new LH_OBJECT[nbTotalListCtx];
	recObjs.phFieldCtx = new LH_OBJECT[nbTotalFieldCtx];
	for(int i =0;i<cfgData.numberOfContexts;i++) {
		for(j=0, iter = cfgData.cfgCtx[i].slot2FileName.begin(); iter!=cfgData.cfgCtx[i].slot2FileName.end(); iter++, j++) {
			{
				m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "loading slot file: " + iter->second);
			}
			recObjs.phListCtx[idxListCtx[i]+j] = lh_GetNullObj();
			if(!initListContexts(i, idxListCtx[i]+j, iter->first.c_str(), iter->second.c_str())) {
				return false;
			}
		}
		for(j=0, iter = cfgData.cfgCtx[i].field2FileName.begin(); iter!=cfgData.cfgCtx[i].field2FileName.end(); iter++, j++) {
			{
				m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "loading field file: " + iter->second);
			}
			recObjs.phFieldCtx[idxFieldCtx[i]+j] = lh_GetNullObj();
			if(!initFieldContexts(i, idxFieldCtx[i]+j, iter->first.c_str(), iter->second.c_str())) {
				return false;
			}
		}
	}
	
	//add each context into recognizer
	for(int i =0;i<cfgData.numberOfContexts;i++) {
		if(!lh_ObjIsNull(recObjs.hCtx[i])) {
			lhErr = lh_RecAddCtx(recObjs.hRec, recObjs.hCtx[i]);
			if (LH_OK != lhErr) {
				{
					m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to add a context into recognizer: " + cfgData.cfgCtx[i].id);
				}
				return false;
			}
		}
	}

	//set parameters for all contexts
	for(int i =0;i<cfgData.numberOfContexts;i++) {
		lhErr = lh_ConfigUpdateAllComputedParams(recObjs.hCtx[i]);
		if (LH_OK != lhErr) {
			{
				m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to set optimized aprameters for context: " + cfgData.cfgCtx[i].id);
			}
			return false;
		}
		if (cfgData.cfgCtx[i].ctxParamAcc > 0) {
			lhErr = lh_ConfigSetParam(recObjs.hCtx[i], LH_CTX_PARAM_ACCURACY, (long)cfgData.cfgCtx[i].ctxParamAcc);
			if (LH_OK != lhErr) {
				{
					m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to set LH_CTX_PARAM_ACCURACY for context: " + cfgData.cfgCtx[i].id);
				}
				return false;
			}
		}
		if (cfgData.cfgCtx[i].ctxParamInitB > 0) {
			lhErr = lh_ConfigSetParam(recObjs.hCtx[i], LH_CTX_PARAM_INITBEAMWIDTH, (long)cfgData.cfgCtx[i].ctxParamInitB);
			if (LH_OK != lhErr) {
				{
					m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to set LH_CTX_PARAM_INITBEAMWIDTH for context: " + cfgData.cfgCtx[i].id);
				}
				return false;
			}
		}
		lhErr = lh_ConfigSetParam(recObjs.hCtx[i], LH_CTX_PARAM_TSILENCE, 1400);
		if (LH_OK != lhErr) return false;
		lhErr = lh_ConfigSetParam(recObjs.hCtx[i], LH_CTX_PARAM_TSILENCE_FX, 700);
		if (LH_OK != lhErr) return false;

	}

	//setup default paramers for recognizer
	// Enabling begin of speech detection on the Fx object.
	lhErr = lh_ConfigSetParam(recObjs.hFx, LH_FX_PARAM_START_ENABLE, LH_TRUE);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to set LH_FX_PARAM_START_ENABLE for fx:");
		return false;
	}

	// Setting a time-out of 10 seconds on the AudioSource object.
	lhErr = lh_ConfigSetParam(recObjs.hAudioSource, LH_AUDIOSOURCE_PARAM_TIMEOUT, 5000);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to set LH_AUDIOSOURCE_PARAM_TIMEOUT for audio:");
		return false;
	}

	/* The frame size can only be obtained when the engine is started,  */
	/* because only at that time all required information is available. */
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "starting audio source to obtain frame size.");
	lhErr = lh_AudioSourceStart(recObjs.hAudioSource);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to start the audio source to obtain frame size.");
		return false;
	}
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "obtaining frame size.");
	lhErr = lh_AudioTypeGetFrameSize(recObjs.hAudioSource, (size_t*)&(recObjs.audioSourceFrameSizeInBytes));
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to obtain frame size.");
		return false;
	}
	/* Stop the engine. */
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "stopping audio source to obtain frame size.");
	lhErr = lh_AudioSourceBreak(recObjs.hAudioSource);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to stop the audio source to obtain frame size.");
		return false;
	}

	/* Return without errors. */
	return true;
}

bool Vocon45::initHostContexts(int i) {
	LH_ERROR				lhErr = LH_OK;  /* Error from the VoCon3200 (lh_) API. */
	ST_ERROR				stErr = ST_OK;  /* Error from the stream (st_) API. */
	LH_ISTREAM_INTERFACE	IStreamInterface;
	void					*pIStreamContext = NULL;

	/* Create the input stream from the context file. */
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "for the context named: " + cfgData.cfgCtx[i].id + "creating the input stream from the context file.");

	stErr = st_CreateStreamReaderFromFile(cfgData.cfgCtx[i].fileName.c_str(), &IStreamInterface, &pIStreamContext);
	if (ST_OK != stErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to create the input stream from the context file.");
		return false;
	}

	/* Create the context object from that stream. */
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "creating the context object from that stream.");
	lhErr = lh_CreateCtx(recObjs.hCompAsr, &IStreamInterface, pIStreamContext, NULL, recObjs.hAcMod, &recObjs.hCtx[i]);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to create the context object from that stream.");
		return false;
	}

	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "setting the user data (id) as context name.");
	lhErr = lh_ObjSetUserData(recObjs.hCtx[i], (void *)cfgData.cfgCtx[i].id.c_str());
	if (LH_OK != lhErr)	{
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to set the user data (id) as context name.");
		return false;
	}
	return true;
}

bool Vocon45::initListContexts(int iCtx, int iList, const char *slotNM, const char *slotFN) {
	LH_ERROR				lhErr = LH_OK;
	LH_GRM_TERMINAL			*pTerminals = new LH_GRM_TERMINAL[1];
	UIFile					fp;
	int						i = 0;
	string					line;
	char					*tmp;
	LH_TERMSEQ_STATUS		hStatus;
	LH_USERID_RANGE			range = LH_USERID_RANGE_64;

	recObjs.phListCtx[iList] = lh_GetNullObj();
	lhErr = lh_CreateEmptyListCtx(recObjs.hCompAsr,
								  recObjs.hAcMod,
								  LH_LISTCTX_KEEP_STRINGS_KEEP_IDS,
								  range,
								  &recObjs.phListCtx[iList]);
	
	fp.openFile(slotFN);
	while(fp.getLine(line)) {
		tmp = new char[line.length()+1];
		strcpy(tmp, line.c_str());
		pTerminals[0].szOrthography = tmp;
		pTerminals[0].userID.lo32 = i+1;
		lh_TransLookUpFetchTranscriptions(recObjs.h_lex, pTerminals[0].szOrthography, &(pTerminals[0].pTranscriptions), &(pTerminals[0].nbrTranscriptions));
		//add terminals to the list Ctx
		if(pTerminals[0].nbrTranscriptions > 0) {
			lhErr = lh_ListCtxAddTermSeq(recObjs.phListCtx[iList], &pTerminals[0], 1, &hStatus);
			if (lhErr != LH_OK || !hStatus.added) {
				m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to load slot file: " + std::string(slotFN) + "for word: " + tmp);
				return false;
			}
		}
		i++;
		delete []tmp;
	}
	fp.closeFile();
	delete []pTerminals;
	pTerminals = NULL;

	//commit list context
	lhErr = lh_ListCtxCommit(recObjs.phListCtx[iList]);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to commit context when loading slot file: " + std::string(slotFN));
		return false;
	}
	
	//finishing adding context
	if(!lh_ObjIsNull(recObjs.hCtx[iCtx])) {
		if (cfgData.cfgCtx[iCtx].type == "slm") {
			//add listCtx to SLM host ctx
			lhErr = lh_HostCtxSetCtx(recObjs.hCtx[iCtx], slotNM, recObjs.phListCtx[iList]);
			if (lhErr != LH_OK) {
				m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to set list context to slm hosting: " + std::string(slotFN));
				return false;
			}
		} else {
			//add listCtx to BNF host ctx
			line = cfgData.cfgCtx[iCtx].grammarName;
			line += "#";
			line += slotNM;
			lhErr = lh_HostCtxSetCtx(recObjs.hCtx[iCtx], line.c_str(), recObjs.phListCtx[iList]);
			if (lhErr != LH_OK) {
				{
					m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to set list context to bnf hosting: " + std::string(slotFN));
				}
				return false;
			}
		}
	}
	return true;
}

bool Vocon45::initFieldContexts(int iCtx, int iField, const char *fieldNM, const char *fieldFN) {
	LH_ERROR				lhErr = LH_OK;
	ST_ERROR				stErr = ST_OK;
	LH_ISTREAM_INTERFACE	IStreamInterface;
	void					*pIStreamContext = NULL;
	string					line;
	
	/* Create the input stream from the context file. */
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "for the field context named: " + std::string(fieldFN) + "creating the input stream from the field context file.");

	stErr = st_CreateStreamReaderFromFile(fieldFN, &IStreamInterface, &pIStreamContext);
	if (ST_OK != stErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to create the input stream from the context file.");
		return false;
	}

	/* Create the context object from that stream. */
	m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "creating the field context object from that stream.");
	recObjs.phFieldCtx[iField] = lh_GetNullObj();
	lhErr = lh_CreateCtx(recObjs.hCompAsr, &IStreamInterface, pIStreamContext, NULL, recObjs.hAcMod, &recObjs.phFieldCtx[iField]);
	if (LH_OK != lhErr) {
		m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "fail to create the context object from that stream.");
		return false;
	}

	//finishing adding context
	if(!lh_ObjIsNull(recObjs.hCtx[iCtx])) {
		if (cfgData.cfgCtx[iCtx].type == "slm") {
			//add fieldCtx to SLM host ctx
			lhErr = lh_HostCtxSetCtx(recObjs.hCtx[iCtx], fieldNM, recObjs.phFieldCtx[iField]);
			if (lhErr != LH_OK) {
				m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to set field context to slm hosting: " + std::string(fieldFN));
				return false;
			}
		} else {
			//add fieldCtx to BNF host ctx
			line = cfgData.cfgCtx[iCtx].grammarName;
			line += "#";
			line += fieldNM;
			lhErr = lh_HostCtxSetCtx(recObjs.hCtx[iCtx], line.c_str(), recObjs.phFieldCtx[iField]);
			if (lhErr != LH_OK) {
				{
					m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to set field context to bnf hosting: " + std::string(fieldFN));
				}
				return false;
			}
		}
	}
	return true;
}

bool Vocon45::initSem() {
	LH_ERROR				lhErr = LH_OK;
	PH_ERROR				phErr = PH_OK;
	ST_ERROR				stErr = ST_OK;
	LH_ISTREAM_INTERFACE    stream[3];
	LH_ISTREAM_PARAMS       stream_params[3];
	void*                   stream_inst[3];

	for(int i=0;i<cfgData.numberOfSem;i++) {
		//voconSem.e_last_semnbest    = SEMNBEST_OK;
		voconSem.h_csem[i]             = lh_GetNullComponent();
		voconSem.h_semproc[i]          = lh_GetNullObj();
		voconSem.heap_inst[i]          = (void*)0;
		
		/* Open a heap for the SEM component. */
		memset(&(voconSem.heap_itf[i]), 0, sizeof(LH_HEAP_INTERFACE));
		phErr = ph_DlHeapCreate(NULL, &(voconSem.heap_itf[i]), &(voconSem.heap_inst[i]));
		if (phErr != PH_OK) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to create heap for sem");
			return false;
		}

		/* Initialize the SEM component. */
		lhErr = lh_InitSem(recObjs.hCompBase, &(voconSem.heap_itf[i]), voconSem.heap_inst[i], &(voconSem.h_csem[i]));
		if (lhErr != LH_OK) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to init sem");
			return false;
		}
			
		/* Create the stream for the semproc data. */
		memset(&(stream_params[i]), 0, sizeof(LH_ISTREAM_PARAMS));
		stream_inst[i] = (void*) NULL;
		stErr = st_CreateStreamReaderFromFile(cfgData.cfgSem[i].fileName.c_str(), &stream[i], &stream_inst[i]);
		if (stErr != ST_OK) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to open stream for sem");
			return false;
		}
		/* Create the semproc object. */
		lhErr = lh_CreateSemProc(voconSem.h_csem[i], &stream[i], stream_inst[i], &stream_params[i], &(voconSem.h_semproc[i]));
		if (lhErr != LH_OK) {
			m_pLogger->err(MODULE_SpeechManager, "Vocon45Wrapper", "unable to create sem");
			return false;
		}
		m_pLogger->inf(MODULE_SpeechManager, "Vocon45Wrapper", "loaded sem buffer: " + cfgData.cfgSem[i].fileName);
	}
	return true;
}

void Vocon45::closeAll() {
	/* If an error occured, don't try to close the individual objects,  */
	/* just close the components.                                       */
	if (!lh_ComponentIsNull(recObjs.hCompAsr)) (void)lh_ComponentTerminate(&(recObjs.hCompAsr));
	if (!lh_ComponentIsNull(recObjs.hCompPron)) (void)lh_ComponentTerminate(&(recObjs.hCompPron));
	if (!lh_ComponentIsNull(recObjs.hCompBase)) (void)lh_ComponentTerminate(&(recObjs.hCompBase));

	/* Finally, close the private heap. */
	if (recObjs.pHeapInst) (void)ph_CloseWin32PrivateHeap(&(recObjs.pHeapInst));
}

bool Vocon45::handleEvent (LH_AUDIOCHAIN_EVENT_INFO iEvent, int *pbStopAudioIn, SRError& error) {
	*pbStopAudioIn = 0;
	/* Look what the event was. */

	/* LH_AUDIOCHAIN_EVENT_BOS */
	/* This event is generated by an Fx object. */
	if (iEvent.type & LH_AUDIOCHAIN_EVENT_BOS )   {
		iEvent.type &= ~LH_AUDIOCHAIN_EVENT_BOS;
	}

	/* LH_AUDIOCHAIN_EVENT_MAYBE_SPEECH */
	/* This event is generated by an Fx object. */
	if (iEvent.type & LH_AUDIOCHAIN_EVENT_MAYBE_SPEECH )   {
		iEvent.type &= ~LH_AUDIOCHAIN_EVENT_MAYBE_SPEECH;
	}

	/* LH_AUDIOCHAIN_EVENT_NO_SPEECH */
	/* This event is generated by an Fx object. */
	if (iEvent.type & LH_AUDIOCHAIN_EVENT_NO_SPEECH )   {
		iEvent.type &= ~LH_AUDIOCHAIN_EVENT_NO_SPEECH;
	}

	/* LH_AUDIOCHAIN_EVENT_TS_FX */
	/* This event is generated by an Fx object. */
	if (iEvent.type & LH_AUDIOCHAIN_EVENT_TS_FX )   {
		iEvent.type &= ~LH_AUDIOCHAIN_EVENT_TS_FX;
	}

	/* LH_AUDIOCHAIN_EVENT_TIMEOUT */
	/* This event is generated by an Fx or AudioSource object. */
	if (iEvent.type & LH_AUDIOCHAIN_EVENT_TIMEOUT )   {
		iEvent.type &= ~LH_AUDIOCHAIN_EVENT_TIMEOUT;
	}

	/* LH_AUDIOCHAIN_EVENT_TS_REC */
	/* This event is generated by a Rec object. */
	if (iEvent.type & LH_AUDIOCHAIN_EVENT_TS_REC )   {
		iEvent.type &= ~LH_AUDIOCHAIN_EVENT_TS_REC;
	}

	/* LH_AUDIOCHAIN_EVENT_FX_GAINREQUEST */
	/* This event is generated by an Fx object. */
	if (iEvent.type & LH_AUDIOCHAIN_EVENT_FX_GAINREQUEST )   {
		iEvent.type &= ~LH_AUDIOCHAIN_EVENT_FX_GAINREQUEST;
	}

	/* LH_AUDIOCHAIN_EVENT_FX_ABNORMCOND */
	/* This event is generated by an Fx object. */
	if (iEvent.type & LH_AUDIOCHAIN_EVENT_FX_ABNORMCOND )   {
		LH_ERROR         lhErr = LH_OK;      /* Error returned by an LH API function. */
		LH_FX_ABNORMCOND abnormCondition;
		/* Find out what the exact abnormal condition is. */
		lhErr = lh_FxGetAbnormCondition(recObjs.hFx, &abnormCondition);
		if (LH_OK != lhErr) return false;

		switch (abnormCondition) {
		case LH_FX_BADSNR:
			error = SR_ERROR_AUDIOSIGNAL_EXCEPTION_BADSNR;
			break;
		case LH_FX_OVERLOAD:
			error = SR_ERROR_AUDIOSIGNAL_EXCEPTION_OVERLOAD;
			break;
		case LH_FX_TOOQUIET:
			error = SR_ERROR_AUDIOSIGNAL_EXCEPTION_TOOQUIET;
			break;
		case LH_FX_NOSIGNAL:
			error = SR_ERROR_AUDIOSIGNAL_EXCEPTION_NOSIGNAL;
			break;
		case LH_FX_POORMIC:
			error = SR_ERROR_AUDIOSIGNAL_EXCEPTION_POORMIC;
			break;
		case LH_FX_NOLEADINGSILENCE:
			error = SR_ERROR_AUDIOSIGNAL_EXCEPTION_NOLEADINGSILENCE;
			break;
		case LH_FX_ABNORMNULL:
			error = SR_ERROR_UNKNOWN;
			break;
		}
		iEvent.type &= ~LH_AUDIOCHAIN_EVENT_FX_ABNORMCOND;
	}

	/* LH_AUDIOCHAIN_EVENT_FX_TIMER */
	/* This event is generated by an Fx object, if the LH_FX_PARAM_EVENT_TIMER is set. */
	/* It usually is used to get the signal level and SNR at regular intervals.        */
	if (iEvent.type & LH_AUDIOCHAIN_EVENT_FX_TIMER )   {
		LH_ERROR            lhErr = LH_OK;      /* Error returned by an LH API function. */
		LH_FX_SIGNAL_LEVELS SignalLevels;
		lhErr = lh_FxGetSignalLevels(recObjs.hFx, &SignalLevels);
		if (LH_OK != lhErr) return false;
		iEvent.type &= ~LH_AUDIOCHAIN_EVENT_FX_TIMER;
	}

	/* LH_AUDIOCHAIN_EVENT_FRAME_AVAILABLE */
	/* This event is generated by an AudioSink object. */
	if (iEvent.type & LH_AUDIOCHAIN_EVENT_FRAME_AVAILABLE )   {
		iEvent.type &= ~LH_AUDIOCHAIN_EVENT_FRAME_AVAILABLE;
	}

	/* LH_AUDIOCHAIN_EVENT_INTERNAL */
	/* This event is generated by an AudioSink object. */
	if (iEvent.type & LH_AUDIOCHAIN_EVENT_INTERNAL )   {
		iEvent.type &= ~LH_AUDIOCHAIN_EVENT_INTERNAL;
	}

	/* LH_AUDIOCHAIN_EVENT_MULTITHREADBUF_FRAMEAVAILABLE */
	/* This event is generated by a MultithreadBufWriter object. */
	if (iEvent.type & LH_AUDIOCHAIN_EVENT_MULTITHREADBUF_FRAMEAVAILABLE )   {
		iEvent.type &= ~LH_AUDIOCHAIN_EVENT_MULTITHREADBUF_FRAMEAVAILABLE;
	}

	/* LH_AUDIOCHAIN_EVENT_MULTITHREADBUF_WASEMPTY */
	/* This event is generated by a MultithreadBufWriter object. */
	if (iEvent.type & LH_AUDIOCHAIN_EVENT_MULTITHREADBUF_WASEMPTY )   {
		iEvent.type &= ~LH_AUDIOCHAIN_EVENT_MULTITHREADBUF_WASEMPTY;
	}

	/* LH_AUDIOCHAIN_EVENT_MULTITHREADBUF_ISFULL */
	/* This event is generated by a MultithreadBufWriter object. */
	if (iEvent.type & LH_AUDIOCHAIN_EVENT_MULTITHREADBUF_ISFULL )   {
		iEvent.type &= ~LH_AUDIOCHAIN_EVENT_MULTITHREADBUF_ISFULL;
	}

	/* LH_AUDIOCHAIN_EVENT_END_OF_PASS */
	/* This event is generated by a Rec object. */
	if (iEvent.type & LH_AUDIOCHAIN_EVENT_END_OF_PASS )   {
		iEvent.type &= ~LH_AUDIOCHAIN_EVENT_END_OF_PASS;
	}

	/* LH_AUDIOCHAIN_EVENT_RESULT */
	/* This event is generated by a Rec object. */
	if (iEvent.type & LH_AUDIOCHAIN_EVENT_RESULT )   {
		LH_ERROR        lhErr = LH_OK;                /* Error returned by an LH API function. */
		LH_OBJECT       hNBestRes = lh_GetNullObj();
		LH_OBJECT       hResCtx = lh_GetNullObj();
		char            *objName;
		string			strObjName = objName;
		size_t			nbrHypothesis;
		char            *szResultWords;
		LH_HYPOTHESIS   *pHypothesis;
		int				ctxIdx;
		int				semIdx;
		LH_OBJECT		h_semresult   = lh_GetNullObj();
		unsigned long	nbr_topics;

		/* Get the NBest result object and process it. */
		lhErr = lh_RecCreateResult (recObjs.hRec, &hNBestRes);
		if (LH_OK != lhErr) return false;
		lhErr = lh_ResultBorrowSourceCtx(hNBestRes, &hResCtx);
		if (LH_OK != lhErr) return false;
		lhErr = lh_ObjGetUserData(hResCtx, (void**)&objName);
		if (LH_OK != lhErr) return false;

		ctxIdx = cfgData.ctxID2Index[strObjName];
		
		lhErr = lh_NBestResultGetNbrHypotheses(hNBestRes, &nbrHypothesis);
		if (LH_OK != lhErr) return false;

		int numberOfHyp = (int)nbrHypothesis;
		if (nbrHypothesis > (size_t)cfgData.nBest) numberOfHyp = cfgData.nBest;
		
		for (int i = 0; i < numberOfHyp; i++) {
			/* Retrieve information on the recognition result. */
			lhErr = lh_NBestResultFetchHypothesis (hNBestRes, i, &pHypothesis);
			if (LH_OK != lhErr) return false;
			/* Get the result string. */
			lhErr = lh_NBestResultFetchWords (hNBestRes, i, &szResultWords);
			if (LH_OK != lhErr) return false;

			SRHypothesis hyp;
			hyp.srSource		= SR_ENGINE_VOCON45;
			hyp.srHypID			= i+1;
			hyp.sentStartTime	= pHypothesis->beginTimeMs;
			hyp.sentEndTime		= pHypothesis->endTimeMs;

			strcpy(hyp.sentence, szResultWords);

			hyp.sentConf		= pHypothesis->conf / 10000.0;
			hyp.sentLMScore		= pHypothesis->lmScore / 10000.0;
			hyp.sentProb		= pHypothesis->score / 10000.0;

			hyp.numOfWords		= pHypothesis->nbrWords;
			
			for (int iWord = 0; iWord<(int)pHypothesis->nbrWords; iWord++) {
				strcpy(hyp.sentWords[iWord].word, pHypothesis->pWords[iWord].szWord);
				hyp.sentWords[iWord].isSRSlot		= pHypothesis->pWords[iWord].isSlot;
				if (hyp.sentWords[iWord].isSRSlot)
					strcpy(hyp.sentWords[iWord].srSlotName, pHypothesis->pWords[iWord].szWord);
				else
					strcpy(hyp.sentWords[iWord].srSlotName, "");
				hyp.sentWords[iWord].wordConf		= pHypothesis->pWords[iWord].conf / 10000.0;
				hyp.sentWords[iWord].wordLMScore	= pHypothesis->pWords[iWord].lmScore / 10000.0;
				hyp.sentWords[iWord].wordProb		= pHypothesis->pWords[iWord].score / 10000.0;
				hyp.sentWords[iWord].wordStartTime	= pHypothesis->pWords[iWord].beginTimeMs;
				hyp.sentWords[iWord].wordEndTime	= pHypothesis->pWords[iWord].endTimeMs;
				hyp.sentWords[iWord].wordUserID		= pHypothesis->pWords[iWord].userID;
			}
			pResult->addSRHyp(hyp);

			/* Return the fetched data to the engine. */
			lhErr = lh_NBestResultReturnWords (hNBestRes, szResultWords);
			if (LH_OK != lhErr) return false;
			lhErr = lh_NBestResultReturnHypothesis (hNBestRes, pHypothesis);
			if (LH_OK != lhErr) return false;
		}

		//after get all rec results, do sem process if needed
		if (cfgData.cfgCtx[ctxIdx].semId != "") {
			//for nuance sem processor
			PASR2SEM2 hAsr2Sem;

			//from asr to sem
			semIdx = cfgData.semID2Index[cfgData.cfgCtx[ctxIdx].semId];
			lhErr = a2s_CreateAsr2Sem2(&(voconSem.heap_itf[semIdx]), &voconSem.heap_inst[semIdx], voconSem.h_semproc[semIdx], &hAsr2Sem);
			if (LH_OK != lhErr) return false;

			lhErr = a2s_Asr2SemAddNBestResult2(hAsr2Sem, hNBestRes);
			if (LH_OK != lhErr) return false;

			lhErr = a2s_Asr2SemProcess2(hAsr2Sem);
			if (LH_OK != lhErr) return false;

			/* create a new semantics result */
			lhErr = lh_SemProcCreateSemResult(voconSem.h_semproc[semIdx], &h_semresult);
			if (LH_OK != lhErr) return false;

			/* access the content of the semantics result */
			lhErr = lh_SemResultGetNbrTopics(h_semresult, &nbr_topics);
			if (LH_OK != lhErr) return false;

			//extract results
			for(unsigned long iTopic=0; iTopic<nbr_topics; iTopic++) {
				LH_SEMRESULT_TOPIC		*p_topic;

				lhErr = lh_SemResultFetchTopic(h_semresult, iTopic, &p_topic);
				if (LH_OK != lhErr) return false;

				NLUHypothesis hyp;
				hyp.nluSource		= NLU_ENGINE_VOCON;
				hyp.srHypID			= -1;
				strcpy(hyp.topic, p_topic->szName);
				hyp.topicConf		= p_topic->score / 10000.0;
				hyp.numOfInterps	= p_topic->nbrInterpretations;

				for(unsigned long iInterp=0; iInterp<p_topic->nbrInterpretations; iInterp++) {
					hyp.interpretations[iInterp].interpConf = p_topic->pInterpretations[iInterp].score / 10000.0;
					hyp.interpretations[iInterp].numOfSlots = p_topic->pInterpretations[iInterp].nbrSlots;

					for(unsigned long iSlot=0; iSlot<p_topic->pInterpretations[iInterp].nbrSlots; iSlot++) {
						strcpy(hyp.interpretations[iInterp].slots[iSlot].slotName, p_topic->pInterpretations[iInterp].pSlots[iSlot].szName);
						strcpy(hyp.interpretations[iInterp].slots[iSlot].slotValue, p_topic->pInterpretations[iInterp].pSlots[iSlot].szValue);
						hyp.interpretations[iInterp].slots[iSlot].slotConf = p_topic->pInterpretations[iInterp].pSlots[iSlot].score / 10000.0;
					}
				}
				pResult->addNLUHyp(hyp);

				/* free the n'th topic */
				lhErr = lh_SemResultReturnTopic(h_semresult, p_topic);
				if (LH_OK != lhErr) return false;
			}

			/* close the semantics result */
			lhErr = lh_ObjClose(&h_semresult);
			if (LH_OK != lhErr) return false;

			lhErr = a2s_Asr2SemClose2(&hAsr2Sem);
			if (LH_OK != lhErr) return false;
		} else if (cfgData.cfgCtx[cfgData.ctxID2Index[strObjName]].idMappingFileName != "") {
			//for id mapping
		}

		/* Close the NBest result object. */
		lhErr = lh_ObjClose (&hNBestRes);
		if (LH_OK != lhErr) return false;
		iEvent.type &= ~LH_AUDIOCHAIN_EVENT_RESULT;
	}

	/* LH_AUDIOCHAIN_EVENT_STOP_AUDIOIN */
	/* Many objects can generate this event. */
	if (iEvent.type & LH_AUDIOCHAIN_EVENT_STOP_AUDIOIN )   {
		iEvent.type &= ~LH_AUDIOCHAIN_EVENT_STOP_AUDIOIN;
		*pbStopAudioIn = 1;
	}

	  /* Unhandled event */
	  //if ( event.type != LH_AUDIOCHAIN_NOEVENT )   {
	  //}

	return true;
}
