#include "HybridSpeechRecognizer.h"

#include "vocon45wrapper.h"
#include "GoogleSREngine.h"
#include "UIOSAL.h"
#include "UISpeechEvent.h"
#include "pthread.h"
#include <unistd.h>
#include <iostream>
#include <string>
#include <stdlib.h>     /* atoi */

typedef struct 
{
	SpeechRecognizer* pEngine;
	char audioSource[4096];
	bool isWaiting;
	int sessionID;
} SRRequest;

typedef struct 
{
	HybridSRStrategy* pStrategy;
	SRNLUReference* pReference;
	int sessionID;
	int numOfEngines;
} MergeResultRequest;

std::map<int, std::map<SREngineType, SRNLUResult> > hybridSRResults;

std::map<int, bool> isRecogThreadCancel;
std::map<int, bool> isMergeThreadCancel;
bool isRecDone;

pthread_mutex_t mutexHybridSRResults;
pthread_mutex_t mutexAudioFileReadyFlag;
pthread_mutex_t mutexRecDoneFlag;
pthread_mutex_t mutexThreadCancelFlag;

pthread_cond_t condAudioFileReadyFlag;
pthread_cond_t condWaitingSRResult;

void *recognizeBySingleEngine(void* param) 
{
	SRRequest* pRequest = (SRRequest*)param;

	if (pRequest->isWaiting) {
		UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", 
			"Recognizer Thread (sid=" + toStr(pRequest->sessionID) + ") / " + getSREngineName(pRequest->pEngine->getEngineType()) + ": start waiting for audio file ready");
		pthread_mutex_lock(&mutexAudioFileReadyFlag);
		pthread_cond_wait(&condAudioFileReadyFlag, &mutexAudioFileReadyFlag);
		pthread_mutex_unlock(&mutexAudioFileReadyFlag);
		UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", 
			"Recognizer Thread (sid=" + toStr(pRequest->sessionID) + ") / " + getSREngineName(pRequest->pEngine->getEngineType()) + ": end waiting for audio file ready");
	}

	pthread_mutex_lock(&mutexThreadCancelFlag);
	if (isRecogThreadCancel[pRequest->sessionID] == true) {
		pthread_mutex_unlock(&mutexThreadCancelFlag);
		UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer",
			"Recognizer Thread (sid=" + toStr(pRequest->sessionID) + ") / " + getSREngineName(pRequest->pEngine->getEngineType()) + ": cancelled");
		delete pRequest;
		pthread_exit(NULL);
		return 0;
	}
	pthread_mutex_unlock(&mutexThreadCancelFlag);

	UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", 
		"Recognizer Thread (sid=" + toStr(pRequest->sessionID) + ") / " + getSREngineName(pRequest->pEngine->getEngineType()) + ": start recognization");

	SRNLUResult result;
	SRError error = pRequest->pEngine->recFromFile(pRequest->audioSource, &result);
	
	pthread_mutex_lock(&mutexThreadCancelFlag);
	if (isRecogThreadCancel[pRequest->sessionID] == true) {
		pthread_mutex_unlock(&mutexThreadCancelFlag);
		UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer",
			"Recognizer Thread (sid=" + toStr(pRequest->sessionID) + ") / " + getSREngineName(pRequest->pEngine->getEngineType()) + ": cancelled");
		delete pRequest;
		pthread_exit(NULL);
		return 0;
	}
	pthread_mutex_unlock(&mutexThreadCancelFlag);
	
	if (error != SR_SUCCESS) {
		UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer",
			"Recognizer Thread (sid=" + toStr(pRequest->sessionID) + ") / " + getSREngineName(pRequest->pEngine->getEngineType()) + ": failed\n" + getSRErrorDesc(error));
	}
	else {
		UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", 
			"Recognizer Thread (sid=" + toStr(pRequest->sessionID) + ") / " + getSREngineName(pRequest->pEngine->getEngineType()) + ": result\n" + result.serializeJSON());
	}

	pthread_mutex_lock(&mutexHybridSRResults);
	hybridSRResults[pRequest->sessionID][pRequest->pEngine->getEngineType()] = result;
	pthread_cond_signal(&condWaitingSRResult);
	pthread_mutex_unlock(&mutexHybridSRResults);

	UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", 
		"Recognizer Thread (sid=" + toStr(pRequest->sessionID) + ") / " + getSREngineName(pRequest->pEngine->getEngineType()) + ": end recognization");

	delete pRequest;
	pthread_exit(NULL);
	return 0;
}

void *mergeSRResults(void* param) 
{
	MergeResultRequest* pRequest = (MergeResultRequest*)param;
	SRNLUResult result;
	result.setReference(*(pRequest->pReference));

	pthread_mutex_lock(&mutexRecDoneFlag);
	isRecDone = false;
	pthread_mutex_unlock(&mutexRecDoneFlag);

	if (pRequest->pStrategy->getSyncStrategy() == HYBRID_SR_SYNC_SKIP_OTHERS) 
	{
		pthread_mutex_lock(&mutexHybridSRResults);
		float currentMaxConf = -1.0;
		for (std::map<SREngineType, SRNLUResult>::iterator itResult = hybridSRResults[pRequest->sessionID].begin(); itResult != hybridSRResults[pRequest->sessionID].end(); itResult++) {
			if (itResult->second.getSRHypSize() > 0 && itResult->second.getSRHyp(0)->sentConf > currentMaxConf)
				currentMaxConf = itResult->second.getSRHyp(0)->sentConf;
		}
		while (currentMaxConf < (float)pRequest->pStrategy->getSyncSkipConf() && (int)hybridSRResults[pRequest->sessionID].size() < pRequest->numOfEngines) {
			hybridSRResults[pRequest->sessionID].clear();
			UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): waiting results (" 
				+ toStr(currentMaxConf) + "<" + toStr(pRequest->pStrategy->getSyncSkipConf()) + ")");
			pthread_cond_wait(&condWaitingSRResult, &mutexHybridSRResults);

			pthread_mutex_lock(&mutexThreadCancelFlag);
			if (isMergeThreadCancel[pRequest->sessionID] == true) {
				pthread_mutex_unlock(&mutexThreadCancelFlag);
				pthread_mutex_unlock(&mutexHybridSRResults);
				UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): cancelled");

				pthread_mutex_lock(&mutexRecDoneFlag);
				isRecDone = true;
				pthread_mutex_unlock(&mutexRecDoneFlag);
				delete pRequest;
				pthread_exit(NULL);
				return 0;
			}
			pthread_mutex_unlock(&mutexThreadCancelFlag);

			UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): result recieved");

			for (std::map<SREngineType, SRNLUResult>::iterator itResult = hybridSRResults[pRequest->sessionID].begin(); itResult != hybridSRResults[pRequest->sessionID].end(); itResult++) {
				if (itResult->second.getSRHypSize() > 0 && itResult->second.getSRHyp(0)->sentConf > currentMaxConf)
					currentMaxConf = itResult->second.getSRHyp(0)->sentConf;
			}
		}

		pthread_mutex_lock(&mutexThreadCancelFlag);
		isRecogThreadCancel[pRequest->sessionID] = true;
		pthread_mutex_unlock(&mutexThreadCancelFlag);

		UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): done result waiting (" 
				+ toStr(currentMaxConf) + "/" + toStr(pRequest->pStrategy->getSyncSkipConf()) + ")");
		pthread_mutex_unlock(&mutexHybridSRResults);
	}
	else if (pRequest->pStrategy->getSyncStrategy() == HYBRID_SR_SYNC_WAIT_BY_TIME) 
	{
		UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): sleeping ");
		sleep(pRequest->pStrategy->getSyncWaitTime());
		pthread_mutex_lock(&mutexThreadCancelFlag);
		if (isMergeThreadCancel[pRequest->sessionID] == true) {
			pthread_mutex_unlock(&mutexThreadCancelFlag);
			UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): cancelled");

			pthread_mutex_lock(&mutexRecDoneFlag);
			isRecDone = true;
			pthread_mutex_unlock(&mutexRecDoneFlag);

			delete pRequest;
			pthread_exit(NULL);
			return 0;
		}
		pthread_mutex_unlock(&mutexThreadCancelFlag);
		
		pthread_mutex_lock(&mutexThreadCancelFlag);
		isRecogThreadCancel[pRequest->sessionID] = true;
		pthread_mutex_unlock(&mutexThreadCancelFlag);

		UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): done sleeping");
	}
	else if (pRequest->pStrategy->getSyncStrategy() == HYBRID_SR_SYNC_WAIT_BY_ENGINE)
	{
		pthread_mutex_lock(&mutexHybridSRResults);
		std::map<SREngineType, SRNLUResult>::iterator itResult = hybridSRResults[pRequest->sessionID].find(pRequest->pStrategy->getSyncWaitEngine());
		while (itResult == hybridSRResults[pRequest->sessionID].end()) {
			UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): waiting engine");
			pthread_cond_wait(&condWaitingSRResult, &mutexHybridSRResults);

			pthread_mutex_lock(&mutexThreadCancelFlag);
			if (isMergeThreadCancel[pRequest->sessionID] == true) {
				pthread_mutex_unlock(&mutexThreadCancelFlag);
				pthread_mutex_unlock(&mutexHybridSRResults);
				UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): cancelled");

				pthread_mutex_lock(&mutexRecDoneFlag);
				isRecDone = true;
				pthread_mutex_unlock(&mutexRecDoneFlag);

				delete pRequest;
				pthread_exit(NULL);
				return 0;
			}
			pthread_mutex_unlock(&mutexThreadCancelFlag);

			itResult = hybridSRResults[pRequest->sessionID].find(pRequest->pStrategy->getSyncWaitEngine());
			UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): result recieved");
		}

		pthread_mutex_lock(&mutexThreadCancelFlag);
		isRecogThreadCancel[pRequest->sessionID] = true;
		pthread_mutex_unlock(&mutexThreadCancelFlag);

		UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): done waiting engine");
		pthread_mutex_unlock(&mutexHybridSRResults);
	}
	else if (pRequest->pStrategy->getSyncStrategy() == HYBRID_SR_SYNC_WAIT_ALL)
	{
		pthread_mutex_lock(&mutexHybridSRResults);
		while (hybridSRResults[pRequest->sessionID].size() < (unsigned int)pRequest->numOfEngines) {
			UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): waiting results");
			pthread_cond_wait(&condWaitingSRResult, &mutexHybridSRResults);
			pthread_mutex_lock(&mutexThreadCancelFlag);

			if (isMergeThreadCancel[pRequest->sessionID] == true) {
				pthread_mutex_unlock(&mutexThreadCancelFlag);
				pthread_mutex_unlock(&mutexHybridSRResults);
				UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): cancelled");

				pthread_mutex_lock(&mutexRecDoneFlag);
				isRecDone = true;
				pthread_mutex_unlock(&mutexRecDoneFlag);

				delete pRequest;
				pthread_exit(NULL);
				return 0;
			}
			pthread_mutex_unlock(&mutexThreadCancelFlag);

			UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): result recieved");
		}

		pthread_mutex_lock(&mutexThreadCancelFlag);
		isRecogThreadCancel[pRequest->sessionID] = true;
		pthread_mutex_unlock(&mutexThreadCancelFlag);

		UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): done waiting results");
		pthread_mutex_unlock(&mutexHybridSRResults);
	}

	pthread_mutex_lock(&mutexThreadCancelFlag);
	if (isMergeThreadCancel[pRequest->sessionID] == true) {
		pthread_mutex_unlock(&mutexThreadCancelFlag);
		UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): cancelled");

		pthread_mutex_lock(&mutexRecDoneFlag);
		isRecDone = true;
		pthread_mutex_unlock(&mutexRecDoneFlag);

		delete pRequest;
		pthread_exit(NULL);
		return 0;
	}
	pthread_mutex_unlock(&mutexThreadCancelFlag);

	UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): start merging");
	pthread_mutex_lock(&mutexHybridSRResults);
	if (pRequest->pStrategy->getPostProcStrategy() == HYBRID_SR_POST_PROC_ADD_ALL)
	{
		for (std::map<SREngineType, SRNLUResult>::iterator itResult = hybridSRResults[pRequest->sessionID].begin(); itResult != hybridSRResults[pRequest->sessionID].end(); itResult++)
			result.addAllHyp(&(itResult->second));
	}
	else if (pRequest->pStrategy->getPostProcStrategy() == HYBRID_SR_POST_PROC_NORM_CONF)
	{
		float minConf = pRequest->pStrategy->getPostProcMinConf();
		int   maxNum  = pRequest->pStrategy->getPostProcMaxNum();
		SRHypothesis** tmpResults = new SRHypothesis*[maxNum];
		for (int iResult=0; iResult<maxNum; iResult++)
			*(tmpResults+iResult) = 0;
		for (std::map<SREngineType, SRNLUResult>::iterator itResult = hybridSRResults[pRequest->sessionID].begin(); itResult != hybridSRResults[pRequest->sessionID].end(); itResult++) {
			for (int iResult = 0; iResult < itResult->second.getSRHypSize(); iResult++) {
				SRHypothesis* hyp = itResult->second.getSRHyp(iResult);
				if (hyp->sentConf >= minConf) {
					for (int iSort = 0; iSort < maxNum; iSort++) {
						if (*(tmpResults+iSort) == 0) {
							*(tmpResults+iSort) = hyp;
							break;
						}
						else {
							if (hyp->sentConf > (*(tmpResults+iSort))->sentConf) {
								for (int jSort = maxNum-1; jSort > iSort; jSort--) {
									*(tmpResults+jSort) = *(tmpResults+jSort-1);
								}
								*(tmpResults+iSort) = hyp;
								break;
							}
						}
					}
				}
			}
		}
		for (int iResult=0; iResult<maxNum; iResult++) {
			if (*(tmpResults+iResult) != 0)
				result.addSRHyp(*(*(tmpResults+iResult)));
		}
		delete tmpResults;
	}
	else if (pRequest->pStrategy->getPostProcStrategy() == HYBRID_SR_POST_PROC_MERGE)
	{
	}
	hybridSRResults[pRequest->sessionID].clear();
	pthread_mutex_unlock(&mutexHybridSRResults);
	UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Result Merge Thread (sid=" + toStr(pRequest->sessionID) + "): end merging");

	if (result.getSRHypSize() <= 0)
		sendSpeechOutboundEvent(pRequest->sessionID, -1, EVENT_OUT_SR_ERROR, serializeSRError(SR_ERROR_NOT_RECOGNIZED));
	else 
		sendSpeechOutboundEvent(pRequest->sessionID, -1, EVENT_OUT_SR_NLU_RESULT, result.serializeJSON());

	pthread_mutex_lock(&mutexRecDoneFlag);
	isRecDone = true;
	pthread_mutex_unlock(&mutexRecDoneFlag);

	delete pRequest;
	pthread_exit(NULL);
	return 0;
}

HybridSpeechRecognizer::HybridSpeechRecognizer()
{
	m_pLogger = UILogger::getInstance();
	m_pConfigurator = UIConfigResourceManager::getInstance();
	m_isInit = initialize();

	pthread_mutex_init(&mutexHybridSRResults, NULL);
	pthread_mutex_init(&mutexAudioFileReadyFlag, NULL);
	pthread_mutex_init(&mutexRecDoneFlag, NULL);
	pthread_mutex_init(&mutexThreadCancelFlag, NULL);
	pthread_cond_init(&condAudioFileReadyFlag, NULL);
	pthread_cond_init(&condWaitingSRResult, NULL);
}

HybridSpeechRecognizer::~HybridSpeechRecognizer(void)
{
	for (int i=0; i<(int)m_engines.size(); i++)
		delete m_engines[i];

	pthread_mutex_destroy(&mutexHybridSRResults);
	pthread_mutex_destroy(&mutexAudioFileReadyFlag);
	pthread_mutex_destroy(&mutexRecDoneFlag);
	pthread_mutex_destroy(&mutexThreadCancelFlag);
	pthread_cond_destroy(&condAudioFileReadyFlag);
	pthread_cond_destroy(&condWaitingSRResult);
}

bool HybridSpeechRecognizer::initialize(void)
{
	std::vector<std::string> engineNames;
	if (!m_pConfigurator->getStringList(MODULE_SpeechManager, "hybrid", "engine.sr", engineNames)) 
	{
		m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "No SR engine defined. ");
		return false;
	}

	std::string recordEngineName;
	m_pConfigurator->getStringValue(MODULE_SpeechManager, "hybrid", "engine.record", recordEngineName);
	
	std::string strategyFile;
	if (!m_pConfigurator->getFileName(MODULE_SpeechManager, "hybrid", "strategy", strategyFile))
	{
		m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "No Hybrid Strategy defined. ");
		return false;
	}

	if (!parseStrategy(strategyFile))
	{
		m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Strategy file parsing error: " + strategyFile);
		return false;
	}

	for (int i=0; i<(int)engineNames.size(); i++) {
		SpeechRecognizer* pRecognizer = NULL;
		SREngineType type = getSREngineType(engineNames[i]);
		switch (type) 
		{
		case SR_ENGINE_VOCON33:
//			pRecognizer = new Vocon33();
			break;
		case SR_ENGINE_VOCON45:
			pRecognizer = new Vocon45();
			break;
		case SR_ENGINE_GOOGLE: 
			pRecognizer = new GoogleSREngine();
			break;
		default:
			m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Unknown SR engine: " + engineNames[i]);
		}
		if (pRecognizer) {
			if (!pRecognizer->initialize()) {
				m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Fail to initialize the SR engine: " + engineNames[i]);
				delete pRecognizer;
			}
			else {
				if (equalsIgnoreCase(engineNames[i], recordEngineName)) {
					m_recordEngineType = type;
					m_recordEngine = pRecognizer;
				}
				m_engines.push_back(pRecognizer);
				m_EngineType2Engine[type] = pRecognizer;
			}
		}
	}

	return true;
}

void HybridSpeechRecognizer::updateContext(std::string key, std::string value)
{
	m_context.updateContext(key, value);
	m_currentStrategy = m_defaultStrategy;

	std::vector<HybridSRStrategy>::iterator itStrategy;
	for (itStrategy = m_strategies.begin(); itStrategy != m_strategies.end(); ++itStrategy) 
	{
		if ((*itStrategy).getCondition()->getCondPairs().size() != 0) {
			if (m_context.evalCond((*itStrategy).getCondition()))
				m_currentStrategy = (*itStrategy);
		}
	}
}

bool HybridSpeechRecognizer::recFromMic(long sessionID, std::string logAudioFileName)
{
	if (!m_recordEngine) {
		m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "startRecording(): No record engine specified");
		sendSpeechOutboundEvent(sessionID, -1, EVENT_OUT_SR_ERROR, serializeSRError(SR_ERROR_ENGINE_NOT_START));
		return false;
	}

	pthread_mutex_lock(&mutexThreadCancelFlag);
	isRecogThreadCancel[sessionID] = false;
	isMergeThreadCancel[sessionID] = false;
	pthread_mutex_unlock(&mutexThreadCancelFlag);

	pthread_mutex_lock(&mutexHybridSRResults);
	hybridSRResults[sessionID].clear();
	pthread_mutex_unlock(&mutexHybridSRResults);
	
	MergeResultRequest* pMergeRequest = new MergeResultRequest();
	pMergeRequest->numOfEngines = m_currentStrategy.getEngines().size();
	pMergeRequest->pStrategy = &m_currentStrategy;
	pMergeRequest->sessionID = sessionID;
	SRNLUReference ref;
	ref.audioSource = logAudioFileName;
	pMergeRequest->pReference = &ref;

	pthread_t mergeThread;
	pthread_create(&mergeThread, NULL, mergeSRResults, (void*)(pMergeRequest));

	for (int iEngine=0; iEngine<(int)m_engines.size(); iEngine++)
	{
		SREngineType type = m_engines[iEngine]->getEngineType();
		if (type != m_recordEngineType && m_currentStrategy.isEngineIncluded(type)) {
			SRRequest* pRequest = new SRRequest();
			pRequest->pEngine = m_engines[iEngine];
			strcpy(pRequest->audioSource, logAudioFileName.c_str());
			pRequest->isWaiting = true;
			pRequest->sessionID = sessionID;
			pthread_t recognizeThread;
			pthread_create(&recognizeThread, NULL, recognizeBySingleEngine, (void*)(pRequest));
		}
	}

	SRNLUResult baseResult;
	SRError errorCode = m_recordEngine->recFromMic(sessionID, logAudioFileName.c_str(), &baseResult);

	if (errorCode != SR_SUCCESS) {
		UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", "Record fail, cancel recog and merge thread");
		pthread_mutex_lock(&mutexThreadCancelFlag);
		isRecogThreadCancel[sessionID] = true;
		isMergeThreadCancel[sessionID] = true;
		pthread_mutex_unlock(&mutexThreadCancelFlag);

		if (m_recordEngine->isRecording())
			endRecording(sessionID);

		pthread_mutex_lock(&mutexHybridSRResults);
		pthread_cond_signal(&condWaitingSRResult);
		pthread_mutex_unlock(&mutexHybridSRResults);
		sendSpeechOutboundEvent(sessionID, -1, EVENT_OUT_SR_ERROR, serializeSRError(errorCode));
		return false;
	}

	if (m_recordEngine->isRecording())
		endRecording(sessionID);

	if (m_currentStrategy.isEngineIncluded(m_recordEngineType)) {
		pthread_mutex_lock(&mutexHybridSRResults);
		hybridSRResults[sessionID][m_recordEngineType] = baseResult;
		pthread_cond_signal(&condWaitingSRResult);
		pthread_mutex_unlock(&mutexHybridSRResults);
		UILogger::getInstance()->debug(MODULE_SpeechManager, "HybridSpeechRecognizer", 
			"Recognizer Thread / " + getSREngineName(m_recordEngineType) + ": result\n" + baseResult.serializeJSON());
	}

	while (true) {
		pthread_mutex_lock(&mutexRecDoneFlag);
		if (isRecDone) {
			pthread_mutex_unlock(&mutexRecDoneFlag);

			pthread_mutex_lock(&mutexThreadCancelFlag);
			isRecogThreadCancel[sessionID] = true;
			isMergeThreadCancel[sessionID] = true;
			pthread_mutex_unlock(&mutexThreadCancelFlag);

			return true;
		}
		pthread_mutex_unlock(&mutexRecDoneFlag);
		sleep(100);
	}
	return true;
}

bool HybridSpeechRecognizer::recFromFile(long sessionID, const char* fileName)
{
	pthread_mutex_lock(&mutexHybridSRResults);
	hybridSRResults[sessionID].clear();
	pthread_mutex_unlock(&mutexHybridSRResults);
	
	MergeResultRequest* pMergeRequest = new MergeResultRequest();
	pMergeRequest->numOfEngines = m_currentStrategy.getEngines().size();
	pMergeRequest->pStrategy = &m_currentStrategy;
	pMergeRequest->sessionID = sessionID;
	SRNLUReference ref;
	ref.audioSource = std::string(fileName);
	pMergeRequest->pReference = &ref;

	pthread_t mergeThread;
	pthread_create(&mergeThread, NULL, mergeSRResults, (void*)(pMergeRequest));

	for (int iEngine=0; iEngine<(int)m_engines.size(); iEngine++)
	{
		SREngineType type = m_engines[iEngine]->getEngineType();
		if (m_currentStrategy.isEngineIncluded(type)) {
			SRRequest* pRecognizeRequest = new SRRequest();
			pRecognizeRequest->pEngine = m_engines[iEngine];
			strcpy(pRecognizeRequest->audioSource, fileName);
			pRecognizeRequest->isWaiting = false;

			pthread_t recognizeThread;
			pthread_create(&recognizeThread, NULL, recognizeBySingleEngine, (void*)(pRecognizeRequest));
		}
	}

	while (true) {
		pthread_mutex_lock(&mutexRecDoneFlag);
		if (isRecDone) {
			pthread_mutex_unlock(&mutexRecDoneFlag);
			return true;
		}
		pthread_mutex_unlock(&mutexRecDoneFlag);
		sleep(100);
	}
	return true;
}

bool HybridSpeechRecognizer::test(SRNLUReference reference)
{
	SRNLUResult results;
	results.setReference(reference);

	for (int iEngine=0; iEngine<(int)m_engines.size(); iEngine++)
	{
		SRNLUResult result;
		SRError error = m_engines[iEngine]->recFromFile(reference.audioSource.c_str(), &result);
		if (error == SR_SUCCESS) 
			results.addAllSRHyp(&result);
	}
	
	if (SRNLUBatchTester::getInstance()->isTestSR())
		SRNLUBatchTester::getInstance()->logTestResult(results);
	
	if (SRNLUBatchTester::getInstance()->isTestNLU())
		sendSpeechOutboundEvent(-1, -1, EVENT_OUT_SR_NLU_RESULT, results.serializeJSON());

	return true;
}

bool HybridSpeechRecognizer::stopRecording(long sessionID)
{
	if (!m_recordEngine)
		return false;

	return m_recordEngine->stopRecording(sessionID);
}

bool HybridSpeechRecognizer::cancel(void)
{
	return false;
}

bool HybridSpeechRecognizer::parseConditionSection(std::string content, HybridSRStrategy* strategy)
{
	unsigned startPos = content.find("NAME=\"");
	if (startPos == std::string::npos) {
		m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Context section parsing error: no name defined. ");
		return false;
	}
	unsigned endPos	  = content.find("\"", startPos+6);
	if (endPos == std::string::npos) {
		m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Context section parsing error: no name defined. ");
		return false;
	}
	std::string key = content.substr(startPos+6, endPos-startPos-6);

	startPos = content.find("VALUE=\"");
	if (startPos == std::string::npos) {
		m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Context section parsing error: no value defined. ");
		return false;
	}
	endPos   = content.find("\"", startPos+7);
	if (endPos == std::string::npos) {
		m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Context section parsing error: no value defined. ");
		return false;
	}
	std::string value = content.substr(startPos+7, endPos-startPos-7);

	strategy->addCondition(key, value);
	return true;
}

bool HybridSpeechRecognizer::parseEngineSection(std::string content, HybridSRStrategy* strategy)
{
	unsigned startPos = content.find("ID=\"");
	if (startPos == std::string::npos) {
		m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Engine section parsing error: no id defined. ");
		return false;
	}
	unsigned endPos	  = content.find("\"", startPos+4);
	if (endPos == std::string::npos) {
		m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Engine section parsing error: no id defined. ");
		return false;
	}
	std::string id = content.substr(startPos+4, endPos-startPos-4);

	strategy->addEngine(getSREngineType(id));
	return true;
}

bool HybridSpeechRecognizer::parseSyncSection(std::string content, HybridSRStrategy* strategy)
{
	unsigned startPos = content.find("TYPE=\"");
	if (startPos == std::string::npos) {
		m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Sync section parsing error: no type defined. ");
		return false;
	}
	unsigned endPos	  = content.find("\"", startPos+6);
	if (endPos == std::string::npos) {
		m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Sync section parsing error: no type defined. ");
		return false;
	}

	HYBRID_SR_SYNC_STRATEGY_TYPE type = getHybridSyncStrategyType(content.substr(startPos+6, endPos-startPos-6));
	if (type == HYBRID_SR_SYNC_WAIT_BY_TIME) 
	{
		startPos = content.find("MAXWAITINGTIME=\"");
		if (startPos == std::string::npos) {
			m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Sync section parsing error: no maxWaitingTime defined. ");
			return false;
		}
		unsigned endPos	  = content.find("\"", startPos+16);
		if (endPos == std::string::npos) {
			m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Sync section parsing error: no maxWaitingTime defined. ");
			return false;
		}
		int maxWaitingTime = atoi(content.substr(startPos+16, endPos-startPos-16).c_str());
		strategy->setSyncStrategy(type);
		strategy->setSyncWaitTime(maxWaitingTime);
	}
	else if (type == HYBRID_SR_SYNC_WAIT_BY_ENGINE) 
	{
		startPos = content.find("ENGINE=\"");
		if (startPos == std::string::npos) {
			m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Sync section parsing error: no waiting engine defined. ");
			return false;
		}
		unsigned endPos	  = content.find("\"", startPos+8);
		if (endPos == std::string::npos) {
			m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Sync section parsing error: no waiting engine defined. ");
			return false;
		}
		SREngineType engine = getSREngineType(content.substr(startPos+8, endPos-startPos-8));
		strategy->setSyncStrategy(type);
		strategy->setSyncWaitEngine(engine);
	}
	else if (type == HYBRID_SR_SYNC_SKIP_OTHERS) 
	{
		startPos = content.find("MINCONF=\"");
		if (startPos == std::string::npos) {
			strategy->setSyncSkipConf(0.0);
		}
		else {
			unsigned endPos	  = content.find("\"", startPos+9);
			if (endPos == std::string::npos) {
				m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Sync section parsing error: minconf definition error. ");
				return false;
			}
			strategy->setSyncSkipConf(atof(content.substr(startPos+9, endPos-startPos-9).c_str()));
		}
		strategy->setSyncStrategy(type);
	}
	else {
		strategy->setSyncStrategy(type);
	}
	return true;
}

bool HybridSpeechRecognizer::parsePostProcessSection(std::string content, HybridSRStrategy* strategy)
{
	unsigned startPos = content.find("TYPE=\"");
	if (startPos == std::string::npos) {
		m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "PostProc section parsing error: no type defined. ");
		return false;
	}
	unsigned endPos	  = content.find("\"", startPos+6);
	if (endPos == std::string::npos) {
		m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "PostProc section parsing error: no type defined. ");
		return false;
	}

	HYBRID_SR_POST_PROC_STRATEGY_TYPE type = getHybridPostProcSyncStrategyType(content.substr(startPos+6, endPos-startPos-6));
	if (type == HYBRID_SR_POST_PROC_NORM_CONF) 
	{
		startPos = content.find("MINCONF=\"");
		if (startPos == std::string::npos) {
			m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "PostProc section parsing error: no minConf defined. ");
			return false;
		}
		unsigned endPos	  = content.find("\"", startPos+9);
		if (endPos == std::string::npos) {
			m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "PostProc section parsing error: no minConf defined. ");
			return false;
		}
		float minConf = atof(content.substr(startPos+9, endPos-startPos-9).c_str());
		
		startPos = content.find("MAXN=\"");
		if (startPos == std::string::npos) {
			m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "PostProc section parsing error: no maxN defined. ");
			return false;
		}
		endPos	  = content.find("\"", startPos+6);
		if (endPos == std::string::npos) {
			m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "PostProc section parsing error: no maxN defined. ");
			return false;
		}
		int maxN = atoi(content.substr(startPos+6, endPos-startPos-6).c_str());

		strategy->setPostProcStrategy(type);
		strategy->setPostProcMaxNum(maxN);
		strategy->setPostProcMinConf(minConf);
	}
	else {
		strategy->setPostProcStrategy(type);
	}
	return true;
}

bool HybridSpeechRecognizer::parseStrategySection(std::string content, HybridSRStrategy* strategy)
{
	unsigned startPos = content.find("<CONTEXT ");
	while (startPos != std::string::npos) 
	{
		unsigned endPos = content.find("/>", startPos);
		if (endPos == std::string::npos)
			return false;

		std::string s = content.substr(startPos, endPos-startPos+2);
		if (!parseConditionSection(s, strategy)) {
			m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Context section parsing error: " + s);
			return false;
		}
		content = content.replace(startPos, endPos-startPos+2, "");
		startPos = content.find("<CONTEXT ");
	}

	startPos = content.find("<ENGINE ");
	while (startPos != std::string::npos) 
	{
		unsigned endPos = content.find("/>", startPos);
		if (endPos == std::string::npos)
			return false;

		std::string s = content.substr(startPos, endPos-startPos+2);
		if (!parseEngineSection(s, strategy)) {
			m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Engine section parsing error: " + s);
			return false;
		}
		content = content.replace(startPos, endPos-startPos+2, "");
		startPos = content.find("<ENGINE ");
	}

	startPos = content.find("<SYNCHRONIZATION ");
	if (startPos != std::string::npos) 
	{
		unsigned endPos = content.find("/>", startPos);
		if (endPos == std::string::npos)
			return false;

		std::string s = content.substr(startPos, endPos-startPos+2);
		if (!parseSyncSection(s, strategy)) {
			m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Sync section parsing error: " + s);
			return false;
		}
		content = content.replace(startPos, endPos-startPos+2, "");
	}

	startPos = content.find("<POSTPROCESSING ");
	if (startPos != std::string::npos) 
	{
		unsigned endPos = content.find("/>", startPos);
		if (endPos == std::string::npos)
			return false;

		std::string s = content.substr(startPos, endPos-startPos+2);
		if (!parsePostProcessSection(s, strategy)) {
			m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "PostProc section parsing error: " + s);
			return false;
		}
		content = content.replace(startPos, endPos-startPos+2, "");
	}

	if (strategy->getEngines().size() == 0)	
	{
		m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "No engine defined: " + content);
		return false;
	}

	if (strategy->getSyncStrategy() == HYBRID_SR_SYNC_UNKNOWN)	
	{
		m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "No sync defined: " + content);
		return false;
	}
	
	if (strategy->getPostProcStrategy() == HYBRID_SR_POST_PROC_UNKNOWN)
	{
		m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "No post-proc defined: " + content);
		return false;
	}
	return true;
}

bool HybridSpeechRecognizer::parseStrategy(std::string filename)
{
	UIFile strategyFile;

	// parse main config file
	strategyFile.openFile(filename.c_str());

	std::string content;
	std::string line;
	while (strategyFile.getLine(line)) {
		trim(line);
		content += line;
	}
	strategyFile.closeFile();

	content = uppercase(content);

	unsigned commentStartPos = content.find("<!--");
	while (commentStartPos != std::string::npos) 
	{
		unsigned commentEndPos = content.find("-->",commentStartPos);
		if (commentEndPos == std::string::npos)
		{
			m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Comment section is not matching.");
			return false;
		}
		content = content.replace(commentStartPos, commentEndPos-commentStartPos+3, "");
		commentStartPos = content.find("<!--");
	}

	unsigned strategyStartPos = content.find("<STRATEGY ");
	while (strategyStartPos != std::string::npos) 
	{
		unsigned strategyEndPos = content.find("</STRATEGY>", strategyStartPos);
		if (strategyEndPos == std::string::npos)
		{
			m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Strategy section is not matching.");
			m_strategies.clear();
			return false;
		}
		
		std::string s = content.substr(strategyStartPos, strategyEndPos-strategyStartPos+11);
		HybridSRStrategy strategy;
		if (!parseStrategySection(s, &strategy)) {
			m_pLogger->err(MODULE_SpeechManager, "HybridSpeechRecognizer", "Strategy section parsing error: " + s);
			m_strategies.clear();
			return false;
		}
		m_strategies.push_back(strategy);

		if (strategy.getCondition()->getCondPairs().size() == 0) {
			m_defaultStrategy = m_strategies[m_strategies.size()-1];
			m_currentStrategy = m_defaultStrategy;
		}
		
		content = content.replace(strategyStartPos, strategyEndPos-strategyStartPos+11, "");
		strategyStartPos = content.find("<STRATEGY ");
	}
	
	return true;
}

void HybridSpeechRecognizer::endRecording(int sessionID)
{
	pthread_mutex_lock(&mutexAudioFileReadyFlag);
	pthread_cond_broadcast(&condAudioFileReadyFlag);
	pthread_mutex_unlock(&mutexAudioFileReadyFlag);
	sendSpeechOutboundEvent(sessionID, -1, EVENT_OUT_END_RECORDING, "");
}
