/*
 * MemMaster.h
 *
 *  Created on: 2011-7-4
 *      Author: yong
 */

#ifndef MEMPAGEMASTER_H_
#define MEMPAGEMASTER_H_
#include<cstddef>
#include<string>
#include<sstream>
#include<vector>
#include<fstream>
#include<boost/bind.hpp>
#include<boost/algorithm/string.hpp>
#include <boost/mpi.hpp>
#include "Typedef.h"
#include "Page.h"
#include "KeyValuePage.h"
#include "GMMInitMapper.h"
#include "GMMMapper.h"
#include "MemPageBoundedBuffer.h"
using namespace std;

namespace distrim
{
/**
 * This class is used to manage memory pages containing records.
 *
 */
template<typename K1, typename V1, typename K2, typename V2>
class MemPageMaster
{
private:
	typedef boost::numeric::ublas::matrix<V2> Matrix;
	typedef std::vector<Matrix> MV;
	typedef KeyValuePage<K1, V1, MV> KVP;
	typedef std::vector<KVP> PV;
	typedef std::vector<GMMInitMapper<K1, V1, V2> > GIV;
	typedef std::vector<GMMMapper<K1, V1, V2> > GV;

private:
	mpi::communicator m_world;
	Config m_config;
	V2 m_oldLikelihood;
	bool m_terminate;
	PV m_vecKeyValuePages;
	MV m_vecParam;
	GIV m_vecInitMapper;
	GV m_vecMapper;
	MV m_vecPartialSum;
	CacheMode::CacheModeEnum m_mapperInputCacheMode;
	MemPageBoundedBuffer<K1, V1, MV> m_mapperInputBoundedBuffer;
	fs::path m_inputFilePath;
	///////////////////////////////////////////////////////////////////
	// Members for multithread synchronization.
	//////////////////////////////////////////////////////////////////
	Mutex m_taskCounterMutex, m_workerThreadMutex, m_isMasterStartedMutex,
			m_isAllWorkDoneMutex, m_vecPartialSumMutex;
	Mutex m_pageCounterMutex, m_blockedMapperCounterMutex;
	Condition m_emptyTaskCondition, m_allMapperNotFinishedCondition,
			m_isMasterStartedCondition, m_allWorkerThreadNotStartedCondition,
			m_isAllWorkDoneCondition;
	Condition m_emptyPageConditon, m_blockedMapperCounterCondition;
	volatile size_t m_workerThread, m_taskCounter, m_iterationCounter;
	volatile size_t m_pageCounter, m_blockedMapperCounter;
	volatile bool m_isMasterStarted, m_isAllWorkDone;

public:
	/**
	 * @precondition:
	 * 1. input file has been splitted into parts(binarized).
	 * 2. maximum number of key-value pairs is known wihin these splits.
	 * @param files:the splitted binary files.
	 * @param numMaxMapperInputPair:
	 * @param numIOThread: number of I/O workers to read splits in.
	 * @param numMapper: number of Mappers to process pages.
	 * @param numMapperInputBuffer: number of buffer pages for I/O workers to put data.
	 * @param numMapperOutputBuffer: number of buffer pages for local reducer to reduce.
	 * @param numLocalReducer: number of local reducers to reduce data, defaults to 1.
	 * @param numValueElem: number of value elements.
	 * @postcondition:
	 */
	MemPageMaster(const Config &config) :
				m_config(config),
				m_oldLikelihood(-1 * std::numeric_limits<V2>::max()),
				m_terminate(false),
				m_vecKeyValuePages(m_config.GetFileSplitsCount()),
				m_vecParam(3 * m_config.GetClusterNum() + 1),
				m_vecInitMapper(m_config.GetMapperCount(), GMMInitMapper<K1,
						V1, V2> (NULL, m_config, NULL)),
				m_vecMapper(m_config.GetMapperCount(), GMMMapper<K1, V1, V2> (
						NULL, m_config, &m_vecParam)), /* Param vector must be initialized before it.*/
				m_vecPartialSum(3 * m_config.GetClusterNum() + 1),
				m_mapperInputCacheMode(
						m_config.GetFileSplitsCount()
								<= m_config.GetInputBufferCount() ? CacheMode::FullCache
								: CacheMode::BoundedCache),
				m_mapperInputBoundedBuffer(m_config, m_mapperInputCacheMode),
				m_inputFilePath(m_config.GetInputPath()), m_workerThread(0),
				m_taskCounter(0), m_iterationCounter(0), m_pageCounter(0),
				m_blockedMapperCounter(0), m_isMasterStarted(false),
				m_isAllWorkDone(false)
	{
		BOOST_ASSERT(m_world.rank() != 0);
		if (m_mapperInputCacheMode == CacheMode::FullCache)
		{
			fcInitializePages();
			initializePartialSums();
			initializeParameters();
			fcReadFile();
			fcCreateThreads();
			waitUntilAllWorkDone();
		}
		else
		{
			bcInitializePages();
			initializePartialSums();
			initializeParameters();
			bcCreateThreads();
			waitUntilAllWorkDone();
			bcClear();
		}
	}
	virtual ~MemPageMaster()
	{
	}

private:
	inline void fcCreateThreads()
	{
		// Create mapper worker threads to do work.
		boost::thread_group mapperWorkerThreads;
		for (size_t i = 0, c = m_config.GetMapperCount(); i < c; ++i)
		{
			mapperWorkerThreads.create_thread(boost::bind(&MemPageMaster<K1,
					V1, K2, V2>::fcWorkerEntry, this));
		}
		boost::thread masterThread(boost::bind(
				&MemPageMaster<K1, V1, K2, V2>::fcMasterEntry, this));
		mapperWorkerThreads.join_all();
		masterThread.join();
	}

	inline void bcCreateThreads()
	{
		boost::thread_group ioThreads; // Create I/O worker threads.
		for (size_t i = 0, c = m_config.GetIOThreadCount(); i < c; ++i)
		{
			ioThreads.create_thread(boost::bind(
					&MemPageMaster<K1, V1, K2, V2>::bcIOThreadEntry, this));
		}
		boost::thread_group mapperWorkerThreads; // Create mapper threads.
		for (size_t i = 0, c = m_config.GetMapperCount(); i < c; ++i)
		{
			mapperWorkerThreads.create_thread(boost::bind(&MemPageMaster<K1,
					V1, K2, V2>::bcMapperEntry, this));
		}
		boost::thread masterThread(boost::bind(
				&MemPageMaster<K1, V1, K2, V2>::bcMasterEntry, this)); // Create master thread.
		ioThreads.join_all();
		mapperWorkerThreads.join_all();
		masterThread.join();
	}

	inline void bcClear()
	{
		foreach (KVP &kvp, m_vecKeyValuePages)
					{
						kvp.Clear();
					}
	}

	inline void bcInitializePages()
	{
		int i = 0;
		size_t pageCapacity = m_config.GetPageCapacity();
		size_t valueElemCount = m_config.GetValueElemCount();
		foreach(KVP &kvp, m_vecKeyValuePages)
					{
						kvp.Page<K1, V1>::SetPageID(i++);
						kvp.SetMaxPairCount(pageCapacity);
						kvp.SetValueElemCount(valueElemCount);
					}
		spliteInputFile();
	}

	inline void spliteInputFile()
	{
		boost::scoped_array<V1> ptrPage(new V1[m_config.GetValueElemCount()
				* m_config.GetPageCapacity()]);
		boost::scoped_array<V1> valueLine(new V1[m_config.GetValueElemCount()]);
		KeyValuePage<K1, V1, MV> kvp;
		ifstream ifs(m_config.GetInputPath().c_str());
		if (!ifs)
		{
			BOOST_THROW_EXCEPTION(std::invalid_argument(string("error open file: ") + m_config.GetInputPath()));
		}
		kvp.SetMaxPairCount(m_config.GetPageCapacity());
		kvp.SetValueElemCount(m_config.GetValueElemCount());
		kvp.Page<K1, V1>::SetPageBegin(ptrPage.get());
		size_t leftPoints = m_config.GetLocalPoints();
		size_t linesToAdd = 0;
		stringstream ss;
		size_t valueElemCount = m_config.GetValueElemCount();
		string lineStr;
		for (size_t i = 0, s = m_vecKeyValuePages.size(); i < s; ++i)
		{
			ss.clear();
			ss.seekg(0, ios::beg);
			ss.str("");
			ss << m_config.GetInputPath() << "." << m_world.rank() << "." << i;
			kvp.Page<K1, V1>::SetFileName(ss.str());
			m_vecKeyValuePages[i].Page<K1, V1>::SetFileName(ss.str());
			if (leftPoints % m_config.GetFileSplitsCount() == 0)
			{
				linesToAdd = m_config.GetLocalPoints()
						/ m_config.GetFileSplitsCount();
			}
			else
			{
				linesToAdd = m_config.GetLocalPoints()
						/ m_config.GetFileSplitsCount() + 1;
			}
			for (size_t j = 0; j < linesToAdd; ++j)
			{
				if (getline(ifs, lineStr) && !lineStr.empty() && lineStr[0]
						!= '@')
				{
					ss.clear();
					ss.seekg(0, ios::beg);
					ss.str(lineStr);
					for (size_t k = 0; k < valueElemCount; ++k)
					{
						ss >> valueLine[k];
					}
				}
				kvp.AddValueArray(valueLine.get(), valueElemCount);
			}
			kvp.WriteToDisk();
			kvp.Reset();
			leftPoints -= linesToAdd;
		}
	}

	inline void bcIOThreadEntry()
	{
		volatile size_t indexToFill = 0;
		volatile size_t validPageIndex = 0;
		while (true)
		{
			if (m_terminate) // If converged, break to exit.

			{
				break;
			}
			else
			{
				indexToFill = m_mapperInputBoundedBuffer.MutexGetIndexToFill();
				Lock lk(m_pageCounterMutex);
				if (indexToFill != size_t(-1) && m_pageCounter
						< m_config.GetFileSplitsCount())
				{
					validPageIndex = m_pageCounter++;
				}
				else // Get a valid index in the bounde buffer to read file in.

				{
					++m_pageCounter;
					m_emptyPageConditon.wait(lk);
					continue;
				}
			}
			m_mapperInputBoundedBuffer.MutexFillBuffer(indexToFill,
					m_vecKeyValuePages[validPageIndex]);
		}
	}

	inline void bcMapperEntry()
	{
		volatile size_t mapperID = m_workerThread++;
		volatile size_t indexToEmpty = 0;
		while (true) // Starting rounds of jobs.

		{
			if (m_terminate)
			{
				// If all worker has finished all job, wake up main thread to do clean-up
				//	stuffs.
				Lock lk(m_workerThreadMutex);
				if (--m_workerThread == 0)
				{
					Lock lk(m_isAllWorkDoneMutex);
					m_isAllWorkDone = true;
					m_isAllWorkDoneCondition.notify_one();
				}
				break;
			}
			else
			{
				indexToEmpty
						= m_mapperInputBoundedBuffer.MutexGetIndexToEmpty();
				if (indexToEmpty != size_t(-1)) // != -1, consume it.
				{
					// If initial iteration, launch init Mapper.
					if (m_iterationCounter == 0)
					{
						// Start consuming the buffer.
						m_vecInitMapper[mapperID].Reset();
						m_vecKeyValuePages[indexToEmpty].SetConsumer(
								&m_vecInitMapper[mapperID]);
						m_mapperInputBoundedBuffer.MutexConsumeBuffer(
								indexToEmpty, m_vecKeyValuePages[indexToEmpty]);
						// Collect partial sum to the total partial sum.

						{
							Lock lk(m_vecPartialSumMutex);
							std::transform(
									m_vecPartialSum.begin(),
									m_vecPartialSum.end(),
									m_vecInitMapper[mapperID].GetPartialSum().begin(),
									m_vecPartialSum.begin(),
									std::plus<Matrix>());
						}
					}
					else // Launch GMMMaper.

					{
						m_vecMapper[mapperID].Reset();
						m_vecKeyValuePages[indexToEmpty].SetConsumer(
								&m_vecMapper[mapperID]);
						m_mapperInputBoundedBuffer.MutexConsumeBuffer(
								indexToEmpty, m_vecKeyValuePages[indexToEmpty]);
						// Collect partial sum to the total partial sum.

						{
							Lock lk(m_vecPartialSumMutex);
							std::transform(
									m_vecPartialSum.begin(),
									m_vecPartialSum.end(),
									m_vecMapper[mapperID].GetPartialSum().begin(),
									m_vecPartialSum.begin(),
									std::plus<Matrix>());
						}
					}
				}
				else
				{
					Lock lk(m_blockedMapperCounterMutex);
					if (++m_blockedMapperCounter == m_config.GetMapperCount())
					{
						// Wait for the last I/O thread to sleep.
						while (true)
						{
							Lock lk(m_pageCounterMutex);
							if (m_pageCounter == m_config.GetFileSplitsCount()
									+ m_config.GetIOThreadCount())
							{
								break;
							}
						}
						m_allMapperNotFinishedCondition.notify_one(); // Notifying master thread to wake up.
					}
					m_blockedMapperCounterCondition.wait(lk); // Block until master finished.
				}
			}
		}
	}

	inline void bcMasterEntry()
	{
		// When all tasks has not been finished, block until next
		//	round of task processing. If converged or maximum iteration
		//	time has reached, break to exit.
		while (true)
		{
			while (true) // wait until all work of a round is done.
			{
				Lock lk(m_pageCounterMutex);
				if (m_pageCounter < m_config.GetFileSplitsCount())
				{
					m_allMapperNotFinishedCondition.wait(lk);
				}
				else
				{
					break;
				}
			}
			while (true) // Wati until all mapper has been blocked.

			{
				Lock lk(m_blockedMapperCounterMutex);
				if (m_blockedMapperCounter == m_config.GetMapperCount())
				{
					break;
				}
			}
			m_world.barrier();
			// Send partial sum to reducer.
			mpi::reduce(m_world, m_vecPartialSum, std::plus<MV>(), 0);
			m_world.barrier();
			m_world.recv(0, msg_param_tag, m_vecParam);
			m_pageCounter = 0;
			m_blockedMapperCounter = 0;
			m_mapperInputBoundedBuffer.Reset();
			++m_iterationCounter;
			if (terminate())
			{
				// If exit condition satisfied, wake up all sleeping work threads to exit
				//	and exit.
				m_emptyPageConditon.notify_all();
				m_blockedMapperCounterCondition.notify_all();
				break;
			}
			else
			{
				resetPartialSums();
				m_emptyPageConditon.notify_all();
				m_blockedMapperCounterCondition.notify_all();
			}
		}
	}

	inline void fcInitializePages()
	{
		size_t pageCapacity = m_config.GetPageCapacity();
		size_t valueElemCount = m_config.GetValueElemCount();
		// Split data into parts in memory, initializeParameters page records.
		for (size_t i = 0, s = m_mapperInputBoundedBuffer.Size(); i < s; ++i)
		{
			m_vecKeyValuePages[i].Page<K1, V1>::SetPageID(i);
			m_vecKeyValuePages[i].Page<K1, V1>::SetPageBegin(
					m_mapperInputBoundedBuffer[i].PageBegin);
			m_vecKeyValuePages[i].SetMaxPairCount(pageCapacity);
			m_vecKeyValuePages[i].SetValueElemCount(valueElemCount);
		}
	}

	inline void fcReadSingleFile(const fs::path &filePath, size_t &lineCounter,
			boost::scoped_array<V1> &valueLine, size_t fileSplits)
	{
		size_t valueElemCount = m_config.GetValueElemCount();
		::boost::filesystem::ifstream ifs(filePath);
		BOOST_ASSERT(ifs);
		std::string lineStr;
		boost::char_separator<char> sep(", @");
		while (getline(ifs, lineStr))
		{
			// For exluding lines in file generate in weka format.
			if (!lineStr.empty() && lineStr[0] != '@')
			{
				boost::tokenizer<boost::char_separator<char> > tokens(lineStr,
						sep);
				boost::tokenizer<boost::char_separator<char> >::const_iterator
						beg = tokens.begin();
				for (size_t i = 0; i < valueElemCount; ++i, ++beg)
				{
					valueLine[i] = boost::lexical_cast<V1>(*beg);
				}
				// Scatter values to different pages.
				m_vecKeyValuePages[lineCounter++ % fileSplits].AddValueArray(
						valueLine.get(), valueElemCount);
			}
		}
	}

	inline void fcReadPreTrainingFile(const fs::path &filePath)
	{
		size_t clusterNum = m_config.GetClusterNum();
		BOOST_ASSERT(m_vecParam.size() == 3 * clusterNum + 1);
		BOOST_ASSERT(fs::is_regular_file(filePath));
		std::string meanPrefix("m_"), covPrefix("c_");
		size_t valueElemCount = m_config.GetValueElemCount();
		::boost::filesystem::ifstream ifs(filePath);
		BOOST_ASSERT(ifs);
		size_t totalPoint = 0;
		Matrix piFrac(1, 1, 1.0);
		Matrix meanFrac(1, valueElemCount);
		Matrix covFrac(valueElemCount, valueElemCount);
		std::string lineStr;
		boost::char_separator<char> sep(", ");
		std::map<std::string, Matrix> msm;
		std::vector<std::string> vecKeys;
		while (getline(ifs, lineStr))
		{
			if (!lineStr.empty() && lineStr[0] != '@')
			{
				++totalPoint;
				boost::tokenizer<boost::char_separator<char> > tokens(lineStr,
						sep);
				boost::tokenizer<boost::char_separator<char> >::const_iterator
						beg = tokens.begin();
				for (size_t i = 0; i < valueElemCount; ++i, ++beg)
				{
					meanFrac(0, i) = boost::lexical_cast<V1>(*beg);
				} // Parsing a line ends.
				std::string stateKey = *beg;
				// Check wether the key exists or not.
				if (msm.find(stateKey) != msm.end())
				{
					msm[stateKey] += piFrac;
					msm[meanPrefix + stateKey] += meanFrac;
					msm[covPrefix + stateKey] += prod(trans(meanFrac),
							meanFrac, covFrac);
				}
				else
				{
					vecKeys.push_back(stateKey);
					// If key does not exist, create new one.
					msm[stateKey] = piFrac;
					msm[meanPrefix + stateKey] = meanFrac;
					msm[covPrefix + stateKey] = prod(trans(meanFrac), meanFrac,
							covFrac);
				}
			}
		}
		Matrix tA(valueElemCount, valueElemCount);
		Matrix tB(valueElemCount, valueElemCount);
		Matrix tC(valueElemCount, valueElemCount);
		for (size_t clusterID = 1, i = 0; i < vecKeys.size(); ++i, ++clusterID)
		{
			std::string &stateKey = vecKeys[i];
			// Evaluate pi.
			m_vecParam[clusterID] = msm[stateKey] / totalPoint;
			// Evaluate mean.
			m_vecParam[clusterID + clusterNum] = msm[meanPrefix + stateKey]
					/ msm[stateKey](0, 0);
			// Evaluate sigma.
			Matrix &refPartialMean = msm[meanPrefix + stateKey];
			Matrix &refMean = m_vecParam[clusterID + clusterNum];
			Matrix &refPatrialSigma = msm[covPrefix + stateKey];
			Matrix &refSigma = m_vecParam[clusterID + 2 * clusterNum];
			prod(trans(refPartialMean), refMean, tA);
			prod(trans(refMean), refPartialMean, tB);
			prod(trans(refMean), refMean, tC);
			refSigma = (refPatrialSigma - tA - tB) / msm[stateKey](0, 0) + tC;
		}
	}

	inline void fcReadDirectory(const fs::path &path, size_t &lineCounter,
			boost::scoped_array<V1> &valueLine, size_t fileSplits)
	{
		for (fs::directory_iterator endIter, dirIter(path); dirIter != endIter; ++dirIter)
		{
			if (fs::is_regular_file(*dirIter))
			{
				fcReadSingleFile(*dirIter, lineCounter, valueLine, fileSplits);
			}
			else if (fs::is_directory(*dirIter))
			{
				fcReadDirectory(*dirIter, lineCounter, valueLine, fileSplits);
			}
		}
	}

	inline void fcReadPath(const fs::path &path, size_t &lineCounter,
			boost::scoped_array<V1> &valueLine, size_t fileSplits)
	{
		if (fs::is_regular_file(path))
		{
			fcReadSingleFile(path, lineCounter, valueLine, fileSplits);
		}
		else if (fs::is_directory(path))
		{
			fcReadDirectory(path, lineCounter, valueLine, fileSplits);
		}
	}

	inline void fcReadFile()
	{
		size_t lineCounter = 0;
		boost::scoped_array<V1> valueLine(new V1[m_config.GetValueElemCount()]);
		fcReadPath(m_inputFilePath, lineCounter, valueLine,
				m_config.GetFileSplitsCount());
	}

	inline void waitUntilAllWorkDone()
	{
		while (true)
		{
			Lock lk(m_isAllWorkDoneMutex);
			if (!m_isAllWorkDone)
			{
				m_isAllWorkDoneCondition.wait(lk);
			}
			else
			{
				break;
			}
		}
	}

	inline void fcWorkerEntry()
	{
		size_t workerID = 0;
		size_t validPageIndex = 0;
		size_t mapperInputBoundedBufferSize = m_mapperInputBoundedBuffer.size();
		size_t mapperCount = m_config.GetMapperCount();
		size_t wakupThreshold = mapperCount + mapperInputBoundedBufferSize;
		while (true)
		{
			Lock lk(m_isMasterStartedMutex);
			if (!m_isMasterStarted)
			{
				m_isMasterStartedCondition.wait(lk);
			}
			else
			{
				break;
			}
		}
		{
			Lock lk(m_workerThreadMutex);
			workerID = m_workerThread;
			if (++m_workerThread == mapperCount)
			{
				// If the last worker thread has been created, wake up
				//	master thread to do the next work.
				m_allWorkerThreadNotStartedCondition.notify_one();
			}
		}
		while (true)
		{
			if (m_terminate)
			{
				// If all worker has finished all job, wake up main thread to do clean-up
				//	stuffs.
				Lock lk(m_workerThreadMutex);
				if (--m_workerThread == 0)
				{
					Lock lk(m_isAllWorkDoneMutex);
					m_isAllWorkDone = true;
					m_isAllWorkDoneCondition.notify_one();
				}
				break;
			}
			else
			{
				Lock lk(m_taskCounterMutex);
				if (m_taskCounter++ >= mapperInputBoundedBufferSize)
				{
					// Wake up blocked master thread if it's the last worker thread
					//	to be blocked, and then no need to wait.
					if (m_taskCounter == wakupThreshold)
					{
						m_allMapperNotFinishedCondition.notify_one();
					}
					m_emptyTaskCondition.wait(lk);
					// If be woken up by master thread, continue to the next round.
					continue;
				}
				else
				{
					// If thread has not been blocked, then the index should be valid.
					validPageIndex = m_taskCounter - 1;
					workerID = validPageIndex;
				}
			}
			// If initial iteration, launch init Mapper.
			if (m_iterationCounter == 0)
			{
				// If no pre-training file present, use random initialization, otherwise
				// use semi-supervised training.
				//				mpi::timer tmr3;
				// Start consuming the buffer.
				m_vecInitMapper[workerID].Reset();
				m_vecKeyValuePages[validPageIndex].SetConsumer(
						&m_vecInitMapper[workerID]);
				m_vecKeyValuePages[validPageIndex].Consume();
				// Collect partial sum to the total partial sum.
				/*{
				 Lock lk(m_vecPartialSumMutex);
				 std::transform(m_vecPartialSum.begin(),
				 m_vecPartialSum.end(),
				 m_vecInitMapper[workerID].GetPartialSum().begin(),
				 m_vecPartialSum.begin(), std::plus<Matrix>());
				 }*/
				//				std::cout << "Processing one block used: " << tmr3.elapsed() << std::endl;
			}
			else // Launch GMMMaper.
			{
				//				mpi::timer tmr3;
				m_vecMapper[workerID].Reset();
				m_vecKeyValuePages[validPageIndex].SetConsumer(
						&m_vecMapper[workerID]);
				m_vecKeyValuePages[validPageIndex].Consume();
				// Collect partial sum to the total partial sum.
				/*
				 {
				 Lock lk(m_vecPartialSumMutex);
				 std::transform(m_vecPartialSum.begin(),
				 m_vecPartialSum.end(),
				 m_vecMapper[workerID].GetPartialSum().begin(),
				 m_vecPartialSum.begin(), std::plus<Matrix>());
				 }*/
				//				std::cout << "Processing one block used: " << tmr3.elapsed() << std::endl;
			}
		}
	}

	inline void fcMasterEntry()
	{
		size_t mapperCount = m_config.GetMapperCount();
		size_t mapperInputBoundedBufferSize = m_mapperInputBoundedBuffer.size();
		size_t blockThreshold = mapperCount + mapperInputBoundedBufferSize;
		// Wait until all worker threads are created.
		while (true)
		{
			{
				Lock lk(m_workerThreadMutex);
				if (m_workerThread == mapperCount)
				{
					break;
				}
			}
			{
				Lock lk(m_isMasterStartedMutex);
				m_isMasterStarted = true;
				m_isMasterStartedCondition.notify_all();
			}
			{
				Lock lk(m_workerThreadMutex);
				if (m_workerThread != mapperCount)
				{
					// Wait until all worker thread started.
					m_allWorkerThreadNotStartedCondition.wait(lk);
				}
			}
		}
		// When all tasks has not been finished, block until next
		//	round of task processing. If converged or maximum iteration
		//	time has reached, break to exit.
		while (true)
		{
			// Sleep here until all worker threads have finished work of a round.
			{
				Lock lk(m_taskCounterMutex);
				if (m_taskCounter < blockThreshold)
				{
					m_allMapperNotFinishedCondition.wait(lk);
				}
				// When wake up, reset the counter.
				m_taskCounter = 0;
			}
			if (m_iterationCounter == 0)
			{
				for (size_t i = 0, c = m_vecInitMapper.size(); i < c; ++i)
				{
					std::transform(m_vecPartialSum.begin(),
							m_vecPartialSum.end(),
							m_vecInitMapper[i].GetPartialSum().begin(),
							m_vecPartialSum.begin(), std::plus<Matrix>());
				}
			}
			else
			{
				for (size_t i = 0, c = m_vecMapper.size(); i < c; ++i)
				{
					std::transform(m_vecPartialSum.begin(),
							m_vecPartialSum.end(),
							m_vecMapper[i].GetPartialSum().begin(),
							m_vecPartialSum.begin(), std::plus<Matrix>());
				}
			}
			//			mpi::timer tmr1;
			m_world.barrier();
			// Send partial sum to reducer.
			mpi::reduce(m_world, m_vecPartialSum, std::plus<MV>(), 0);
			//			std::cout << "Reducing used: " << tmr1.elapsed() << std::endl;
			//			mpi::timer tmr2;
			m_world.barrier();
			m_world.recv(0, msg_param_tag, m_vecParam);
			++m_iterationCounter;
			//			std::cout << "Receiving new parameter used: " << tmr2.elapsed()
			//					<< std::endl;
			if (terminate())
			{
				// If exit condition satisfied, wake up all sleeping work threads to exit
				//	 and exit.
				m_emptyTaskCondition.notify_all();
				break;
			}
			else
			{
				resetPartialSums();
				m_emptyTaskCondition.notify_all();
			}
		}
	}

	bool terminate()
	{
		if ((m_vecParam[0](0, 0) - m_oldLikelihood) / m_config.GetTotalPoints()
				<= m_config.GetConvergeDelta() || m_iterationCounter
				> m_config.GetMaxIterCount())
		{
			m_terminate = true;
		}
		else
		{
			m_oldLikelihood = m_vecParam[0](0, 0);
		}
		return m_terminate;
	}
	inline void resetPartialSums()
	{
		foreach(Matrix &m, m_vecPartialSum)
					{
						m.clear();
					}
	}
	inline void initializeParameters()
	{
		// Request space to store parameters in this process.
		m_vecParam[0] = Matrix(1, 1); // for likelihood.
		for (size_t c = 1, clusterNum = m_config.GetClusterNum(),
				valueElemCount = m_config.GetValueElemCount(); c < clusterNum
				+ 1; ++c)
		{
			m_vecParam[c] = Matrix(1, 1); // For pi, partial pi.
			m_vecParam[c + clusterNum] = Matrix(1, valueElemCount); // For mu, partial mean sum.
			m_vecParam[c + 2 * clusterNum] = Matrix(valueElemCount,
					valueElemCount); // For gamma, x^T * x partial sum.
		}
		std::string preTrainFile = m_config.GetPreTrainingFile();
		if (preTrainFile.size() > 0)
		{
			fs::path preTrainFilePath(preTrainFile);
			fcReadPreTrainingFile(preTrainFilePath);
			std::cout << "Read training parameters: " << std::endl;
			PrintMatrixVector(m_vecParam);
			std::cout << std::endl;
		}
	}

	inline void initializePartialSums()
	{
		// Request space for intermediate partial sum.
		m_vecPartialSum[0] = Matrix(1, 1, V2());
		for (size_t c = 1, clusterNum = m_config.GetClusterNum(),
				valueElemCount = m_config.GetValueElemCount(); c < clusterNum
				+ 1; ++c)
		{
			m_vecPartialSum[c] = Matrix(1, 1, V2()); // gamma
			m_vecPartialSum[c + clusterNum] = Matrix(1, valueElemCount, V2());// mean
			m_vecPartialSum[c + 2 * clusterNum] = Matrix(valueElemCount,
					valueElemCount, V2());// x^T * x
		}
	}
};
}

#endif /* MEMPAGEMASTER_H_ */
