#include "StdAfx.h"
#include "HXSpeech.h"
#include "HXLink.h"
#include "HXWindow.h"

//=============================================================================

HXSpeech g_speechtts;				// speech system

//=============================================================================

HXSpeech::HXSpeech(void)
: m_speaking(false),
  m_pDisp(NULL),
  m_fps(120),
  m_mixFrames(2),
  m_mixFactor(0.5f),
  m_visemeModulator(0.6),
  m_rate(-3.0),
  m_avSyncAdjust(0.0),
  m_overlap(0.5),
  m_engine(NULL),
  m_pbEarlyExit(NULL)
  
{
	InitializeCriticalSectionAndSpinCount(&m_criticalSection_Speech, 0x80000400);
	SetEngine(HXSAPI);
	m_avSyncAdjust = m_engine->GetAVSyncAdjust();
}

HXSpeech::HXSpeech(int fps, int mixFrames, float mixFactor, double visemeModulator, double rate)
: m_pDisp(NULL),
  m_fps(fps),
  m_mixFrames(mixFrames),
  m_mixFactor(mixFactor),
  m_visemeModulator(visemeModulator),
  m_rate(rate),
  m_avSyncAdjust(0.0),
  m_overlap(0.5),
  m_engine(NULL),
  m_pbEarlyExit(NULL)
{
	InitializeCriticalSectionAndSpinCount(&m_criticalSection_Speech, 0x80000400);
	SetEngine(HXSAPI);
	m_avSyncAdjust = m_engine->GetAVSyncAdjust();
}

HXSpeech::~HXSpeech(void)
{
	SAFE_DELETE(m_engine);
	DeleteCriticalSection(&m_criticalSection_Speech);
}

int HXSpeech::SetEngine(HXTTSEngineID id)
{
	SAFE_DELETE(m_engine);

	switch (id)
	{
	case HXSAPI:
		m_engine = new HXTTSEngineSAPI(); //m_fps,m_visemeModulator,m_rate,m_overlap);
		break;
	case HXOpenMary:
		m_engine = new HXTTSEngineMary(); //m_fps,m_visemeModulator,m_rate,m_overlap);
		break;
	}

	m_engine->SetAVSyncAdjust(m_avSyncAdjust);
	m_engine->SetFPS(m_fps);
	m_engine->SetVisemeModulator(m_visemeModulator);
	m_engine->SetRate(m_rate);
	m_engine->SetOverlap(m_overlap);
	m_engine->SetRampFunc(m_rampFunc);
	m_engine->SetVisemeMap(m_visemeMap);


	return 0;
}



/**
* Speak the given text
*/
HRESULT HXSpeech::Speak(const wstring& text)
{

	HXStopWatch watch;

	HWAVEOUT hWaveOut;
	WAVEHDR waveHdr;
	MMTIME wavePosition;
	double factor;
	HPSTR lpData;
	wavePosition.wType = TIME_BYTES;

	int renderTime = 0;
	size_t frame_position = 0;
	size_t prev_frame = static_cast<size_t>(-1);
	int no_change_count = 0;

	vector<vector<HXViseme>> frames;
	string audio;

	m_engine->GenerateSpeech(text, audio, frames);

	//WriteFramesToFile(frames);
	//m_mixout.open("viseme_mixout.txt");

	if (frames.size() == 0)
	{
		#ifdef _DEBUG
			g_window.ShowStatus(L"No visual frames generated");
		#endif
	}

	Lock();

	int playStatus = playMMIO(audio, hWaveOut, waveHdr, &factor, lpData);

	if (playStatus < 0)
	{
		#ifdef _DEBUG
			g_window.ShowStatus(L"Audio playback failed");
		#endif
		Unlock();
		return -1;
	}

	DeleteFile(StringToWString(audio).c_str());

	HXAliveProcessor* pAlive = NULL;
	if (m_pDisp) pAlive = m_pDisp->GetAlive();
	if (pAlive) pAlive->Trig(0, false);
	g_link.SetWordIndex(0);
	
	while (!(waveHdr.dwFlags & WHDR_DONE))
	{		
		// start time for morphLag
		watch.startTimer();

		// get current position in wav (and add the morphLag)
		waveOutGetPosition(hWaveOut,&wavePosition,sizeof(MMTIME));
		//frame_position =  (size_t)(((wavePosition.u.cb / factor) + morphLag) * m_fps);
		frame_position =  (size_t)(((wavePosition.u.cb / factor)) * m_fps);

		#ifdef _DEBUG
		if (frames.size() && !(frame_position % (m_fps/15)))
			g_window.ShowStatus(L"Playing morph frame %d (of %d) with lag %dms", frame_position, frames.size(), renderTime);
		#endif
		
		if (frame_position == prev_frame)
		{
			//g_window.ShowStatus(L"no change");
			no_change_count++;
			continue;
		}

		// check boundary
		if (m_pDisp && (frame_position < frames.size()))
		{
			m_pDisp->Lock();

			m_pDisp->ResetMorphValues(HXMS_SPEECH);

			vector<HXViseme>::iterator visemeIter;

			if (m_pDisp->MorphCount())
			{
				RenderVisemeMix(frames, frame_position, m_mixFrames, m_mixFactor);
				m_pDisp->Unlock();
				// show the corresponding viseme group
				for(visemeIter = frames.at(frame_position).begin(); visemeIter != frames.at(frame_position).end(); ++visemeIter) 
				{
					if (visemeIter->label[0] == 1)
					{
						// show the currently spoken word
						size_t i = 1;
						while (visemeIter->label[i] != 1) { i++; }
						wstring sidx = visemeIter->label.substr(1,i);
						int wordIndex = _wtoi(sidx.c_str());
						if (pAlive) pAlive->Trig(static_cast<unsigned int>(wordIndex), true);
						g_link.SetWordIndex(wordIndex);
						wstring word = visemeIter->label.substr(i+1);
						g_window.ShowStatus(word.c_str());
						break;
					}
					
				}
			}
			else
			{
				m_pDisp->Unlock();
			}
		}

		prev_frame = frame_position;

		// check for an early exit flag		
		if (m_pbEarlyExit && *m_pbEarlyExit) 
		{
			waveOutReset(hWaveOut);
			break;
		}

		// update time
		watch.stopTimer();
		// sleep for remaining frame time
		renderTime = static_cast<int>(watch.getElapsedTime() * 1000);
		int frameTime = static_cast<int>(1000.0f/m_fps);
		if (renderTime < frameTime)
		{
			Sleep(frameTime - renderTime);
		}
	}

#ifdef _DEBUG
	g_window.ShowStatus(L"No change count: %d", no_change_count);
#endif

	//m_mixout.close();

	if (m_pDisp) m_pDisp->ResetMorphValues(HXMS_SPEECH);
	g_link.SetWordIndex(-1);
	
	// tidy up audio
	waveOutUnprepareHeader(hWaveOut, &waveHdr, sizeof(WAVEHDR)); 
	free(lpData);

	Unlock();
	return 0;
}

void HXSpeech::RenderVisemeMix(vector<vector<HXViseme>>& frames, size_t frame_position, size_t mix_size, float mix_factor, float ampl_factor)
{
	vector<float> mix_coef(2*mix_size+1);
	float mix_total = mix_coef[mix_size] = 1;
	for (size_t i = 0; i < mix_size; i++)
	{
		mix_coef[mix_size+i+1] = mix_coef[mix_size-i-1] = mix_coef[mix_size+i] * mix_factor;
		mix_total += 2 * mix_coef[mix_size+i+1];
	}

	float prelude = 0;
	for (size_t i = 0; i < mix_size*2+1; i++)
	{
		int rframe = static_cast<int>(frame_position) - mix_size + i;
		if (rframe < 0)
		{
			prelude += mix_coef[i]/mix_total;
		}
		else
		if (rframe >= static_cast<int>(frames.size()))
		{
			int check_min = rframe - mix_size - 1; // for really short speech (total frames less than 2*mix_size+1)
			if (check_min >= 0)
			{
				// negative render (for proper cut-off)
				for (vector<HXViseme>::iterator visemeIter = frames.at(rframe - mix_size - 1).begin(); visemeIter != frames.at(rframe - mix_size - 1).end(); ++visemeIter) 
				{
					if ((visemeIter->amplitude > -100.0f) && (visemeIter->label[0] != 1)) //  && (visemeIter->label.find(L"Phoneme:") == 0 || visemeIter->label.find(L"Modifier:") == 0))
					{
						m_pDisp->ChangeMorphValue(HXMS_SPEECH, visemeIter->label, -ampl_factor*mix_coef[i - mix_size - 1]/mix_total*static_cast<float>(visemeIter->amplitude));
						//m_mixout << visemeIter->label << ";" << visemeIter->amplitude << ";" << -ampl_factor*mix_coef[i - mix_size - 1]/mix_total << ";" << -ampl_factor*mix_coef[i - mix_size - 1]/mix_total*static_cast<float>(visemeIter->amplitude);
						//if (i != (mix_size*2+1)-1) m_mixout << "|";
					}

				}
			}
		}
		else
		{
			for (vector<HXViseme>::iterator visemeIter = frames.at(rframe).begin(); visemeIter != frames.at(rframe).end(); ++visemeIter) 
			{
				if ((visemeIter->amplitude > -100.0f) && (visemeIter->label[0] != 1)) //  && (visemeIter->label.find(L"Phoneme:") == 0 || visemeIter->label.find(L"Modifier:") == 0)) // some amplitudes are NaN? (cause head explosion)
				{
					m_pDisp->ChangeMorphValue(HXMS_SPEECH, visemeIter->label, ampl_factor*(mix_coef[i]/mix_total+prelude)*static_cast<float>(visemeIter->amplitude));

					//m_mixout << visemeIter->label << ";" << ampl_factor*(mix_coef[i]/mix_total+prelude)*static_cast<float>(visemeIter->amplitude);
					//m_mixout << visemeIter->label << ";" << visemeIter->amplitude << ";" << ampl_factor*(mix_coef[i]/mix_total+prelude) << ";" << ampl_factor*(mix_coef[i]/mix_total+prelude)*static_cast<float>(visemeIter->amplitude);
					//if (visemeIter+1 != frames.at(rframe).end()) m_mixout << "|";
					//if (i != (mix_size*2+1)-1) m_mixout << "|";
				}				
			}
			
			prelude = 0;
		}
	}

	//m_mixout << "\n";
}

/**
* returns 
*      <0 on error
*      0  for file playback
*      1  for memory playback
*/
int HXSpeech::playMMIO(string& audio, HWAVEOUT &hWaveOut, WAVEHDR &waveHdr, double* factor, HPSTR &lpData)
{

	//char        szFileName[128];    // filename of file to open 
	HMMIO       hmmio;              // file handle for open file 
	MMCKINFO    mmckinfoParent;     // parent chunk information 
	MMCKINFO    mmckinfoSubchunk;   // subchunk information structure 
	DWORD       dwFmtSize;          // size of "FMT" chunk 
	DWORD       dwDataSize;         // size of "DATA" chunk 
	WAVEFORMATEX  *pFormat;           // address of "FMT" chunk 
	//HPSTR       lpData;             // address of "DATA" chunk 
	bool        isFile = true;

	UINT  wResult;

	// Open the file for reading with buffered I/O 
	// by using the default internal buffer 
	if(!(hmmio = mmioOpen((LPWSTR)(StringToWString(audio).c_str()), NULL, 
		MMIO_READ | MMIO_ALLOCBUF))) 
	{ 
		isFile = false;

		// maybe be a in memory audio string
		MMIOINFO mmioInfo;
		memset(&mmioInfo, 0, sizeof(MMIOINFO));
		mmioInfo.pchBuffer = (HPSTR)audio.c_str();
		mmioInfo.cchBuffer = audio.length();
		mmioInfo.fccIOProc = FOURCC_MEM;

		if (!(hmmio = mmioOpen(NULL, &mmioInfo, MMIO_READ)))
		{
			//Error("Failed to open file."); 
			return -1; 
		} 
	}


	// Locate a "RIFF" chunk with a "WAVE" form type to make 
	// sure the file is a waveform-audio file. 
	mmckinfoParent.fccType = mmioFOURCC('W', 'A', 'V', 'E'); 
	if (mmioDescend(hmmio, (LPMMCKINFO) &mmckinfoParent, NULL, 
		MMIO_FINDRIFF)) 
	{ 
		//Error("This is not a waveform-audio file."); 
		mmioClose(hmmio, 0); 
		return -1; 
	} 

	// Find the "FMT" chunk (form type "FMT"); it must be 
	// a subchunk of the "RIFF" chunk. 
	mmckinfoSubchunk.ckid = mmioFOURCC('f', 'm', 't', ' '); 
	if (mmioDescend(hmmio, &mmckinfoSubchunk, &mmckinfoParent, 
		MMIO_FINDCHUNK)) 
	{ 
		//Error("Waveform-audio file has no "FMT" chunk."); 
		mmioClose(hmmio, 0); 
		return -1; 
	} 

	// Get the size of the "FMT" chunk. Allocate 
	// and lock memory for it. 
	dwFmtSize = mmckinfoSubchunk.cksize; 
	// Alloc the sample buffer
	pFormat = (WAVEFORMATEX *)malloc(mmckinfoSubchunk.cksize); 


	// Read the "FMT" chunk. 
	if (mmioRead(hmmio, (HPSTR) pFormat, dwFmtSize) != static_cast<LONG>(dwFmtSize)){ 
		//Error("Failed to read format chunk."); 
		mmioClose(hmmio, 0); 
		return -1; 
	} 


	*factor = ((double)pFormat->nSamplesPerSec * pFormat->wBitsPerSample / 8);


	/* Ascend out of the "fmt " subchunk */ 
	mmioAscend(hmmio, &mmckinfoSubchunk,0); 

	/* Find the data subchunk.  The current file position should be at the 
	* beginning of the data chunk, however, you should not make this 
	* assumption--use mmioDescend to locate the data chunk. 
	*/ 
	mmckinfoSubchunk.ckid = mmioFOURCC('d', 'a', 't', 'a'); 
	if (mmioDescend(hmmio, &mmckinfoSubchunk, &mmckinfoParent, 
		MMIO_FINDCHUNK)){ 
			printf("Wave file has no data chunk."); 
			mmioClose(hmmio, 0); 
			free(pFormat);
			return -1; 
	} 

	/* Get the size of the data subchunk 
	*/ 
	dwDataSize = mmckinfoSubchunk.cksize; 
	if (dwDataSize == 0L){ 
		printf("the data chunk contains no data."); 
		mmioClose(hmmio, 0); 
		free(pFormat);
		return -1; 
	} 

	// Open a waveform device for output using window callback. 
	if (waveOutOpen((LPHWAVEOUT)&hWaveOut, WAVE_MAPPER, (LPCWAVEFORMATEX)pFormat, 0L, 0L, CALLBACK_NULL)) 
	{ 
		//error = "Failed to open waveform output device.";
		//return ERROR_PLAY_OPEN_DEVICE_FAIL;
		free(pFormat);
		return -2; 
	} 

	free(pFormat);

	/* Allocate memory for the waveform data */ 
	lpData = (HPSTR)malloc(dwDataSize); 

	/* Read the waveform data subchunk */ 
	if(mmioRead(hmmio, (HPSTR) lpData, dwDataSize) != static_cast<LONG>(dwDataSize)){ 
		printf("Failed to read data chunk."); 
		mmioClose(hmmio, 0); 
		return -1; 
	} 

	// Close the file. 
	mmioClose(hmmio, 0); 



	// Alloc the sample buffer
	memset(&waveHdr,0,sizeof(WAVEHDR));   


	// After allocation, set up and prepare header. 
	waveHdr.lpData = (LPSTR)lpData;
	waveHdr.dwBufferLength = dwDataSize; 
	waveHdr.dwFlags = 0L; 
	waveHdr.dwLoops = 0L; 
	waveOutPrepareHeader(hWaveOut, &waveHdr, sizeof(WAVEHDR)); 

	// Now the data block can be sent to the output device. The 
	// waveOutWrite function returns immediately and waveform 
	// data is sent to the output device in the background. 
	wResult = waveOutWrite(hWaveOut, &waveHdr, sizeof(WAVEHDR)); 
	if (wResult != 0) 
	{ 
		waveOutUnprepareHeader(hWaveOut, &waveHdr, sizeof(WAVEHDR)); 
		free(lpData);
		//return ERROR_PLAY_WRITE_DEVICE_FAIL; 
		return -1; 
	} 

	return isFile?0:1;

}

int HXSpeech::WriteFramesToFile(vector<vector<HXViseme>>& frames)
{

	ofstream out("visemeseq.txt"); // Open for writing

	vector<vector<HXViseme>>::iterator frameIter;
	for(frameIter = frames.begin(); frameIter != frames.end(); ++frameIter) 
	{
	
		vector<HXViseme>::iterator visemeIter;

		for(visemeIter = frameIter->begin(); visemeIter != frameIter->end(); ++visemeIter) 
		{
			out << WStringToString(visemeIter->label) << ";" << visemeIter->amplitude;
			if (visemeIter+1 != frameIter->end()) out << "|";
		}
		out << "\n";
	}
	
	out.close();
	return 0;

}