//#ifdef WIN32
#include "stdafx.h"
//#endif
#include <math.h>
#include "MeltingParallel.h"
 
using namespace Melting;

INT32* gInts;

//********************************************************************************************
#define TEST_RUNFOREVER													1

//********************************************************************************************
#define TEST_BASIC_THREAD_PRINT											0

//********************************************************************************************
// CONCURRENT ALLOC (IMMEDIATE FREE)- START FRESH ----------------------------------------------------------------------
#define TEST_BASIC_THREAD_CONCURRENT_FRESH_DEFAULT_ALLOC_FREE_IMMEDIATE					0
#define TEST_BASIC_THREAD_CONCURRENT_FRESH_NAIVEMEMORYPOOL_ALLOC_FREE_IMMEDIATE			0
#define TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCK_MEMORYPOOL_ALLOC_FREE_IMMEDIATE			0
#define TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCK_HBFIMEMORYPOOL_ALLOC_FREE_IMMEDIATE		0
#define TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCKFREEMEMORYPOOL_ALLOC_FREE_IMMEDIATE		0
// CONCURRENT ALLOC (IMMEDIATE FREE)- START POLLUTED ----------------------------------------------------------------------
#define TEST_BASIC_THREAD_CONCURRENT_POLLUTED_DEFAULT_ALLOC_FREE_IMMEDIATE				0	
#define TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_NAIVEMEMORYPOOL_ALLOC_FREE_IMMEDIATE	0
#define TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_MEMORYPOOL_ALLOC_FREE_IMMEDIATE		0
#define TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_HBFIMEMORYPOOL_ALLOC_FREE_IMMEDIATE	0
#define TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCKFREEMEMORYPOOL_ALLOC_FREE_IMMEDIATE	0


// CONCURRENT ALLOC - START FRESH ----------------------------------------------------------------------
#define TEST_BASIC_THREAD_CONCURRENT_FRESH_DEFAULT_ALLOC_FREE							0
#define TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCK_NAIVEMEMORYPOOL_ALLOC_FREE				0
#define TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCK_MEMORYPOOL_ALLOC_FREE					0
#define TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCK_HBFIMEMORYPOOL_ALLOC_FREE				0
#define TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCKFREEMEMORYPOOL_ALLOC_FREE				0 
// CONCURRENT ALLOC - START POLLUTED ----------------------------------------------------------------------
#define TEST_BASIC_THREAD_CONCURRENT_POLLUTED_DEFAULT_ALLOC_FREE						0	
#define TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_NAIVEMEMORYPOOL_ALLOC_FREE			0	
#define TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_MEMORYPOOL_ALLOC_FREE				0		
#define TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_HBFIMEMORYPOOL_ALLOC_FREE			0		
#define TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCKFREEMEMORYPOOL_ALLOC_FREE				0	


//********************************************************************************************
// SINGLE THREAD ALLOC - START EMPTY -------------------------------------------------------------------
#define TEST_SINGLETHREAD_FRESH_DEFAULT_ALLOC_FREE						0
#define TEST_SINGLETHREAD_FRESH_NAIVEMEMORYPOOL_ALLOC_FREE				0
#define TEST_SINGLETHREAD_FRESH_MEMORYPOOL_ALLOC_FREE					0
#define TEST_SINGLETHREAD_FRESH_HBFIMEMORYPOOL_ALLOC_FREE				0
#define TEST_SINGLETHREAD_FRESH_LOCKFREEMEMORYPOOL_ALLOC_FREE			0
// SINGLE THREAD ALLOC - START POLLUTED -----------------------------------------------------------------
#define TEST_SINGLETHREAD_POLLUTED_DEFAULT_ALLOC_FREE					0
#define TEST_SINGLETHREAD_POLLUTED_NAIVEMEMORYPOOL_ALLOC_FREE			0
#define TEST_SINGLETHREAD_POLLUTED_MEMORYPOOL_ALLOC_FREE				0
#define TEST_SINGLETHREAD_POLLUTED_HBFIMEMORYPOOL_ALLOC_FREE			0
#define TEST_SINGLETHREAD_POLLUTED_LOCKFREEMEMORYPOOL_ALLOC_FREE		0
 
//********************************************************************************************
// CONCURRENT INCREMENT - BASIC THREAD ---------------------------------------------------
#define TEST_BASIC_THREAD_CONCURRENT_ADD_TLS_ATOMIC						0
#define TEST_BASIC_THREAD_CONCURRENT_ADD_ATOMIC							0
#define TEST_BASIC_THREAD_CONCURRENT_ADD_SPINLOCK						0
#define TEST_BASIC_THREAD_CONCURRENT_ADD_RWLOCK							0
#define TEST_BASIC_THREAD_CONCURRENT_ADD_MUTEX							0

// CONCURRENT INCREMENT - WORKER THREAD ---------------------------------------------------
#define TEST_WORKERTHREAD_CONCURRENT_ADD_ATOMIC					0

// CONCURRENT INCREMENT - TASK MANAGER ---------------------------------------------------
#define TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_TLS_ATOMIC			1
#define TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_ATOMIC				0
#define TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_LOCK_SPINLOCK		0
#define TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_LOCK_RWLOCK			0
#define TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_LOCK_MUTEX			0

//********************************************************************************************
// ...... 
#define TEST_TASK_MANAGER_SCHEDULING_AND_MUTEX_AND_LOCKFREEQUEUE		0
#define	TEST_WORKERTHREAD_AND_MUTEX_AND_LOCKFREEQUEUE					0

//********************************************************************************************
// PROFILE TASK MANAGER
#define TEST_PROFILING_TASK_MANAGER					0

//********************************************************************************************
#define	PRINT_EXECUTION_INFO		0
#define PRINT_RESULTS_AS_DUMBLINES	0
#define PRINT_RESULTS_AS_HTML		0

//#define MAX_TEST_FACTOR		10000
#define MAX_TEST_FACTOR		100000
//#define MAX_TEST_FACTOR		1000000
//#define MAX_TEST_FACTOR		10000000			// CANNOT RUN, TOO BIG ! Memory is not enough on my current Hardware, and Memory consumption is probably not optimized
#define MAX_QUEUE_SIZE		MAX_TEST_FACTOR

#define FORCE_THREAD_COUNT  0
#define WORKER_THREAD_COUNT 3 // when forced !



int iLoopCount = 0;

//********************************************************************************************
#define CHECK_ERROR( expr, ... ) \
	if( !(expr) ) {  \
		LOG( "ERROR :" );\
		LOG( __VA_ARGS__ );\
		__debugbreak();\
	}  

namespace Test
{ 
	using namespace Parallel;

	volatile int	iValue;

	int iSleepTime = 0;

	
	TLS<INT32>		tlsSafeCnt;
	TLS< std::vector< UBYTE* >* >	tlsAllocs;

	int				iUnsafeCnt = 0;
	AtomicCounter	iConcurrentCnt(0);

    static SpinLock             sSpinLock;
    static Mutex				sMutex;
    static RWLock				sRWLock;
    static LockFreeQueue< INT32, -1 > sQueue( GetNextPowerOfTwo( MAX_QUEUE_SIZE ) );

    AtomicCounter iCnt;
    std::list< INT32 > list;
    std::list< INT32 > listPush;
    std::list< INT32 > listPop;

	enum TestType
	{	TEST_TYPE_DO_NOTHING					= 0

	,	TEST_TYPE_INCREMENT_ATOMIC			
	,	TEST_TYPE_INCREMENT_ATOMIC_TLS_BEGIN
	,	TEST_TYPE_INCREMENT_ATOMIC_TLS
	,	TEST_TYPE_INCREMENT_ATOMIC_TLS_END
	,	TEST_TYPE_INCREMENT_LOCK_MUTEX
	,	TEST_TYPE_INCREMENT_LOCK_SPINLOCK
	,	TEST_TYPE_INCREMENT_LOCK_RWLOCK

	,	TEST_TYPE_DEFAULT_ALLOC_FREE
	,	TEST_TYPE_LOCK_NAIVEMEMORYPOOL_ALLOC_FREE
	,	TEST_TYPE_LOCK_MEMORYPOOL_ALLOC_FREE
	,	TEST_TYPE_LOCK_HBFIMEMORYPOOL_ALLOC_FREE
	,	TEST_TYPE_LOCKFREEMEMORYPOOL_ALLOC_FREE

	,	TEST_TYPE_DEFAULT_ALLOC
	,	TEST_TYPE_DEFAULT_FREE
	,	TEST_TYPE_LOCK_NAIVEMEMORYPOOL_ALLOC
	,	TEST_TYPE_LOCK_NAIVEMEMORYPOOL_FREE
	,	TEST_TYPE_LOCK_MEMORYPOOL_ALLOC
	,	TEST_TYPE_LOCK_MEMORYPOOL_FREE
	,	TEST_TYPE_LOCK_HBFIMEMORYPOOL_ALLOC
	,	TEST_TYPE_LOCK_HBFIMEMORYPOOL_FREE
	,	TEST_TYPE_LOCKFREEMEMORYPOOL_ALLOC
	,	TEST_TYPE_LOCKFREEMEMORYPOOL_FREE


	,	TEST_TYPE_LOCKFREEQUEUE_ENQUEUE
	,	TEST_TYPE_LOCKFREEQUEUE_DEQUEUE
	};

	void TestFunction( UINT32 _iTestType, UINT32 _iValue )
	{
		
        switch( _iTestType )
        {
		case TEST_TYPE_DO_NOTHING:
			Parallel::Sleep( iSleepTime );
			break;

        case TEST_TYPE_INCREMENT_ATOMIC :
			iConcurrentCnt.Increment(); 
			break;

        case TEST_TYPE_INCREMENT_ATOMIC_TLS_BEGIN :
			{
				if( PRINT_EXECUTION_INFO )
				{
					LOG( "TLS_BEGIN by Thread %d\n", Parallel::Thread::GetCurrentThreadID() );
				} 

				tlsSafeCnt.Set( 0 );
			}
			break;

        case TEST_TYPE_INCREMENT_ATOMIC_TLS :
			{
				if( PRINT_EXECUTION_INFO )
				{
					LOG( "TLS_INC by Thread %d\n", Parallel::Thread::GetCurrentThreadID() );
				} 

				tlsSafeCnt.Set( 1 + tlsSafeCnt.Get() );
			}
			break;
			
        case TEST_TYPE_INCREMENT_ATOMIC_TLS_END :
			{
				if( PRINT_EXECUTION_INFO )
				{
					LOG( "TLS_END by Thread %d\n", GetCurrentThreadId() );//Parallel::Thread::GetCurrentThreadHandle() );
				}

				//INT32* pCnt = tlsSafeCnt.Get();
				//{ 
				//	GEN_ASSERT( nullptr != pCnt , "TLS DOES not work properly >.<' \n" );
				//}

				//// gather
				//iConcurrentCnt.Add( (*pCnt) );

				//// reset tls
				//delete pCnt;
				//tlsSafeCnt.Set( nullptr );
				
				// gather
				iConcurrentCnt.Add( tlsSafeCnt.Get() );
			}
			break;
			

        case TEST_TYPE_INCREMENT_LOCK_MUTEX: 
			sMutex.Lock();
			iUnsafeCnt++;
			sMutex.Unlock(); 
			break;

        case TEST_TYPE_INCREMENT_LOCK_SPINLOCK: 
			sSpinLock.Lock();
			iUnsafeCnt++;
			sSpinLock.Unlock(); 
			break;

        case TEST_TYPE_INCREMENT_LOCK_RWLOCK: 
			sRWLock.LockWrite();
			iUnsafeCnt++;
			sRWLock.UnlockWrite(); 
			break;
			
        case TEST_TYPE_DEFAULT_ALLOC_FREE:  
		{
			UBYTE* p = new UBYTE[12];
			Parallel::Sleep( iSleepTime  );
			delete [] p;
		}
			break;

		case TEST_TYPE_LOCK_NAIVEMEMORYPOOL_ALLOC_FREE:
		{
			NaiveMemoryPool* pPool = (NaiveMemoryPool*) _iValue;			

			sMutex.Lock();
			UBYTE* pMemory = pPool->Allocate();						
			sMutex.Unlock();

			Parallel::Sleep( iSleepTime );

			sMutex.Lock();
			pPool->Free( pMemory );
			sMutex.Unlock();
		}
			break;

		case TEST_TYPE_LOCK_MEMORYPOOL_ALLOC_FREE:
		{
			MemoryPool* pPool = (MemoryPool*) _iValue;			

			sMutex.Lock();
			UBYTE* pMemory = pPool->Allocate();						
			sMutex.Unlock();

			Parallel::Sleep( iSleepTime  );

			sMutex.Lock();
			pPool->Free( pMemory );
			sMutex.Unlock();
		}
			break;

		case TEST_TYPE_LOCK_HBFIMEMORYPOOL_ALLOC_FREE:
		{
			HBFIMemoryPool* pPool = (HBFIMemoryPool*) _iValue;			

			sMutex.Lock();
			UBYTE* pMemory = pPool->Allocate();						
			sMutex.Unlock();

			Parallel::Sleep( iSleepTime  );

			sMutex.Lock();
			pPool->Free( pMemory );
			sMutex.Unlock();
		}
			break;

        case TEST_TYPE_LOCKFREEMEMORYPOOL_ALLOC_FREE:  
		{
			LockFreeMemoryPool* pPool = (LockFreeMemoryPool*) _iValue;			

			UBYTE* pMemory = pPool->Allocate();			

			Parallel::Sleep( iSleepTime  );

			pPool->Free( pMemory );
		}
			break;		
			
        case TEST_TYPE_DEFAULT_ALLOC :  
		{
			UBYTE* p = new UBYTE[12];

			// save p
			tlsAllocs.Get()->push_back( (UBYTE*)p );
			Parallel::Sleep( iSleepTime  );
		}
			break;
  
        case TEST_TYPE_DEFAULT_FREE:  
		{
			// retrieve P
			std::vector< UBYTE* >* pAllocs = tlsAllocs.Get();
			UBYTE* pMemory = pAllocs->back();
			pAllocs->pop_back();

			delete [] pMemory;
		}
			break;

		case TEST_TYPE_LOCK_NAIVEMEMORYPOOL_ALLOC :
		{
			NaiveMemoryPool* pPool = (NaiveMemoryPool*) _iValue;			

			sMutex.Lock();
			UBYTE* pMemory = pPool->Allocate();						
			sMutex.Unlock();

			Parallel::Sleep( iSleepTime  );

			// save p
			tlsAllocs.Get()->push_back( pMemory );
		}
			break;

		case TEST_TYPE_LOCK_NAIVEMEMORYPOOL_FREE:
		{
			NaiveMemoryPool* pPool = (NaiveMemoryPool*) _iValue;			

			// retrieve P
			std::vector< UBYTE* >* pAllocs = tlsAllocs.Get();
			UBYTE* pMemory = pAllocs->back();
			pAllocs->pop_back(); 

			sMutex.Lock();
			pPool->Free( pMemory );
			sMutex.Unlock();
		}
			break; 

			
		case TEST_TYPE_LOCK_MEMORYPOOL_ALLOC :
		{
			MemoryPool* pPool = (MemoryPool*) _iValue;			

			sMutex.Lock();
			UBYTE* pMemory = pPool->Allocate();						
			sMutex.Unlock();

			Parallel::Sleep( iSleepTime  );

			// save p
			tlsAllocs.Get()->push_back( pMemory );
		}
			break;

		case TEST_TYPE_LOCK_MEMORYPOOL_FREE:
		{
			MemoryPool* pPool = (MemoryPool*) _iValue;			

			// retrieve P
			std::vector< UBYTE* >* pAllocs = tlsAllocs.Get();
			UBYTE* pMemory = pAllocs->back();
			pAllocs->pop_back(); 

			sMutex.Lock();
			pPool->Free( pMemory );
			sMutex.Unlock();
		}
			break; 

			
		case TEST_TYPE_LOCK_HBFIMEMORYPOOL_ALLOC :
		{
			HBFIMemoryPool* pPool = (HBFIMemoryPool*) _iValue;			

			sMutex.Lock();
			UBYTE* pMemory = pPool->Allocate();						
			sMutex.Unlock();

			Parallel::Sleep( iSleepTime  );

			// save p
			tlsAllocs.Get()->push_back( pMemory );
		}
			break;

		case TEST_TYPE_LOCK_HBFIMEMORYPOOL_FREE:
		{
			HBFIMemoryPool* pPool = (HBFIMemoryPool*) _iValue;			

			// retrieve P
			std::vector< UBYTE* >* pAllocs = tlsAllocs.Get();
			UBYTE* pMemory = pAllocs->back();
			pAllocs->pop_back(); 

			sMutex.Lock();
			pPool->Free( pMemory );
			sMutex.Unlock();
		}
			break; 
        case TEST_TYPE_LOCKFREEMEMORYPOOL_ALLOC :  
		{
			LockFreeMemoryPool* pPool = (LockFreeMemoryPool*) _iValue;			

			UBYTE* pMemory = pPool->Allocate();		
			Parallel::Sleep( iSleepTime  );	
 
			// save p
			tlsAllocs.Get()->push_back( pMemory );
		}
			break;		

        case TEST_TYPE_LOCKFREEMEMORYPOOL_FREE:  
		{
			LockFreeMemoryPool* pPool = (LockFreeMemoryPool*) _iValue;			
 
			// retrieve P
			std::vector< UBYTE* >* pAllocs = tlsAllocs.Get();
			UBYTE* pMemory = pAllocs->back();
			pAllocs->pop_back();  

			pPool->Free( pMemory );
		}
			break;	

        case TEST_TYPE_LOCKFREEQUEUE_ENQUEUE :
        { 
			do
			{
				bool bSuccess = sQueue.Enqueue( gInts[ _iValue ] );
				if( bSuccess )
				{
					sMutex.Lock();
					listPush.push_back( _iValue );
					sMutex.Unlock();
					break;
				}

				if( PRINT_EXECUTION_INFO )
				{
 					printf(" Could not ENQUEUE at %d\n", _iValue );
				}
				Parallel::YieldProcessor();
			}
			while( true );
        }
        break;
        case TEST_TYPE_LOCKFREEQUEUE_DEQUEUE :
            {
                INT32 iData;
				
				do
				{
					iData = sQueue.Dequeue();

					if( -1 != iData )
					{ 
						sMutex.Lock();
						list.push_back( iData );
						listPop.push_back( iData );
						sMutex.Unlock();
						break;
					}
					if( PRINT_EXECUTION_INFO )
					{ 
 						printf(" Could not DEQUEUE at %d\n", _iValue );
					} 

					Parallel::YieldProcessor();
				}
				while( true );
            }
        break;
        }
	}
  


	
	
	struct TestWork : public Work 
    { 
        void Execute( void )
        {			
			TestFunction( miType, miValue ); 
        }
		
		void OnExecuted( void )
		{
			LockFreeMemoryPool* pPool = (LockFreeMemoryPool*) miValue;
			pPool->Free( (UBYTE*)this );
		}

        TestWork( UINT32 _iType = 0, UINT32 _iValue = 0)
        :   miType ( _iType )
        ,   miValue( _iValue )
        {
        }
        UINT32 miType;
        UINT32 miValue;
    }; 

	void ProcessTask( const Parallel::TaskFuncParams& _rParams )
	{
		//-------------------------------------------------
		// When you want to record a time measure [ from any thread ]
		PROFILING_BEGINRECORDTIME( id, "ProcessTask" );

		{
			UINT32 miType	= _rParams.mData8[ 0 ];
			UINT32 miValue	= _rParams.mData8[ 1 ];

			TestFunction( miType, miValue );		
		}

		PROFILING_ENDRECORDTIME( id );
	}

	void BuildTask( UINT32 _iType, UINT32 _iValue, Parallel::Task& _rTask )
	{
		_rTask.mParams.mData8[ 0 ] = _iType;
		_rTask.mParams.mData8[ 1 ] = _iValue;

		_rTask.mFunc = Test::ProcessTask;
	} 
	
	void ProcessTaskMergeProfilingStats( const Parallel::TaskFuncParams& _rParams )
	{
		//-------------------------------------------------
		// When you want to process the stats (e.g. print) ,
		// or before leaving your sub-thread :
		//  first merge them [ from every thread (sub + main) ]
		Profiling::MergeTLSStats();
	}

	void BuildTaskMergeProfilingStats( Parallel::Task& _rTask )
	{
		_rTask.mFunc = Test::ProcessTaskMergeProfilingStats;
	}



	//===================================================================================================
	Parallel::ThreadFunctionReturn ConcurrentPrint( void* _pData ) 
	{
		//-------------------------------------------------
		// In each [ sub-thread ], call once 
		Profiling::InitializeTLS(); 

		//-------------------------------------------------
		// When you want to record a time measure [ from any thread ]
		Profiling::StatsID id = Profiling::BeginRecordTime( "Name of the Measure" );
		{
			// The code you want to profile 
			Parallel::Thread* pThread = static_cast<Parallel::Thread*>(_pData);
			printf("Hello from thread %d said %s \n", Parallel::Thread::GetCurrentThreadID(), (const char*) pThread->GetData() );
		}
		Profiling::EndRecordTime( id );
		
		//-------------------------------------------------
		// When you want to process the stats (e.g. print) ,
		// or before leaving your sub-thread :
		//  first merge them [ from every thread (sub + main) ]
		Profiling::MergeTLSStats();

		//-------------------------------------------------
		// Before leaving any [ sub-thread ]
		Profiling::ReleaseTLS();
		
		return (Parallel::ThreadFunctionReturn)0;
	}
	
	//===================================================================================================
	Parallel::ThreadFunctionReturn ConcurrentIncrement( void* _pData )
	{
		Parallel::Thread* pThread = static_cast<Parallel::Thread*>(_pData);

		struct DataPair
		{
			UINT32		miTestType;
			UINT32		miValue;
		};
		DataPair* pData = (DataPair*) pThread->GetData(); 

		for( INT32 i = 0; i < MAX_TEST_FACTOR; i ++ )
		{			
			TestFunction( pData->miTestType, pData->miValue );
		}

		return (Parallel::ThreadFunctionReturn)0;
	}

	//===================================================================================================
	Parallel::ThreadFunctionReturn ConcurrentIncrementWithTLS( void* _pData )
	{
		Parallel::Thread* pThread = static_cast<Parallel::Thread*>(_pData);

		struct DataPair
		{
			UINT32		miTestType;
			UINT32		miValue;
		};
		DataPair* pData = (DataPair*) pThread->GetData(); 

		// Begin TLS
		Test::TestFunction( TEST_TYPE_INCREMENT_ATOMIC_TLS_BEGIN, 0 );

		for( INT32 i = 0; i < MAX_TEST_FACTOR; i ++ )
		{			
			TestFunction( pData->miTestType, pData->miValue );
		}
		
		// End TLS
		Test::TestFunction( TEST_TYPE_INCREMENT_ATOMIC_TLS_END, 0 );

		return (Parallel::ThreadFunctionReturn)0;
	}

	//===================================================================================================
	Parallel::ThreadFunctionReturn ConcurrentMemoryAllocFree( void* _pData )
	{
		Parallel::Thread* pThread = static_cast<Parallel::Thread*>(_pData);

		struct DataPair
		{
			UINT32		miTestType;
			UINT32		miValue;
		};
		DataPair* pData = (DataPair*) pThread->GetData(); 

		std::vector< UBYTE* > vAllocs;

		switch( pData->miTestType )
		{ 
		case TEST_TYPE_DEFAULT_ALLOC_FREE:
		case TEST_TYPE_LOCK_MEMORYPOOL_ALLOC_FREE:
		case TEST_TYPE_LOCK_HBFIMEMORYPOOL_ALLOC_FREE:
		case TEST_TYPE_LOCK_NAIVEMEMORYPOOL_ALLOC_FREE:
		case TEST_TYPE_LOCKFREEMEMORYPOOL_ALLOC_FREE:
			
			for( INT32 i = 0; i < MAX_TEST_FACTOR; i ++ )
			{			
				TestFunction( pData->miTestType, pData->miValue );
			}
			break;

		case TEST_TYPE_DEFAULT_ALLOC:
		case TEST_TYPE_LOCK_NAIVEMEMORYPOOL_ALLOC:
		case TEST_TYPE_LOCK_HBFIMEMORYPOOL_ALLOC:
		case TEST_TYPE_LOCK_MEMORYPOOL_ALLOC:
		case TEST_TYPE_LOCKFREEMEMORYPOOL_ALLOC:

		//case TEST_TYPE_DEFAULT_FREE:
		//case TEST_TYPE_LOCK_MEMORYPOOL_FREE:
		//case TEST_TYPE_LOCKFREEMEMORYPOOL_FREE:

			// PERF HIT >.< 
			vAllocs.reserve( MAX_TEST_FACTOR );

			Test::tlsAllocs.Set( & vAllocs );

			for( INT32 i = 0; i < MAX_TEST_FACTOR; i ++ )
			{			
				// ALLOC
				TestFunction( pData->miTestType, pData->miValue );
			}
			for( INT32 i = 0; i < MAX_TEST_FACTOR; i ++ )
			{			
				// ..._ALLOC + 1 => ..._FREE
				TestFunction( pData->miTestType + 1 , pData->miValue );
			}
			break;
		default: 
			assert( false && "test not properly configured\n" ); // should not happen
		}



		return (Parallel::ThreadFunctionReturn)0;
	}

} 




////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


struct BenchTestResult
{
	float			mfDuration;
	const char*		mstrName;
	
	BenchTestResult( float _fDuration, const char* _strName )
	:	mfDuration( _fDuration )
	,	mstrName( _strName )
	{
	}
};

bool SortFunc( const BenchTestResult& d1, const BenchTestResult& d2 )
{
	return d1.mfDuration < d2.mfDuration;
}


int		NB_THREADS;
INT32	MAX;

Parallel::TaskManager*			pTaskManager = nullptr;
std::vector< BenchTestResult >	vResults;
	

//===================================================================================================
int GetNbTasks( UINT32 _iTestType )
{
	int iNbTasks = 0;

	int iNbIncrement = MAX_TEST_FACTOR * NB_THREADS;	

	switch( _iTestType )
	{
	case Test::TEST_TYPE_INCREMENT_ATOMIC :		
//	case Test::TEST_TYPE_INCREMENT_ATOMIC_TLS :		
		iNbTasks = iNbIncrement + 1;
		break;
	case Test::TEST_TYPE_INCREMENT_LOCK_MUTEX :
	case Test::TEST_TYPE_INCREMENT_LOCK_SPINLOCK :
	case Test::TEST_TYPE_INCREMENT_LOCK_RWLOCK :	
		iNbTasks = iNbIncrement + 1;
		break;
	case Test::TEST_TYPE_LOCKFREEMEMORYPOOL_ALLOC_FREE:
		iNbTasks = iNbIncrement;
		break;
	}

	return iNbTasks;
}

//===================================================================================================
int CheckResult( UINT32 _iTestType, UINT32 _iNbIncrement )
{ 
	// Check results ........................................................................
	int iExpected = 0;
	
	switch( _iTestType )
	{
	case Test::TEST_TYPE_INCREMENT_ATOMIC :		
	case Test::TEST_TYPE_INCREMENT_ATOMIC_TLS :		
		iExpected = Test::iConcurrentCnt.Get();
		// Check results ........................................................................
		CHECK_ERROR( _iNbIncrement == iExpected, 
			" Unexpected result - the concurrent counter is %d when it should be %d\n", iExpected, _iNbIncrement );  
		return _iNbIncrement == iExpected;

		break;
	case Test::TEST_TYPE_INCREMENT_LOCK_MUTEX :
	case Test::TEST_TYPE_INCREMENT_LOCK_SPINLOCK :
	case Test::TEST_TYPE_INCREMENT_LOCK_RWLOCK :
		iExpected = Test::iUnsafeCnt;
		// Check results ........................................................................
		CHECK_ERROR( _iNbIncrement == iExpected, 
			" Unexpected result - the concurrent counter is %d when it should be %d\n", iExpected, _iNbIncrement ); 
		return _iNbIncrement == iExpected; 

		break;
	case Test::TEST_TYPE_LOCKFREEMEMORYPOOL_ALLOC_FREE:
		break;
	} 
	
	return true;
}


//===================================================================================================
void LaunchTestWithBasicThreads( const char* _strTestName, int _iNbThreads, Parallel::ThreadFunctionPtr _pFunc, UINT32 _iTestType, UINT32 _iValue, bool _bSaveResult )
{
	MSG("=============================================================================\n");			
	MSG("=> Iteration %d\n", iLoopCount ); 
	MSG(" %s \n\n", _strTestName );
	MSG(" - functions : \n");
	MSG(".Thread:: Create, Run, Wait, Destroy\n");
	MSG(".AtomicCounter:: Increment\n");
	MSG("-----------------------------------------------------------------------------\n");

	Test::iUnsafeCnt = 0;
	Test::iConcurrentCnt.Set( 0 );		

	int iNbIncrement = MAX_TEST_FACTOR * _iNbThreads;
	int iNbTasks = iNbIncrement;
	
	MSG("-- working on %d concurrent Tasks -- \n", iNbTasks ); 


	struct DataPair
	{
		UINT32		miTestType;
		UINT32		miValue;
	};
	DataPair data;
	data.miTestType = _iTestType;
	data.miValue	= _iValue;

	std::vector<Parallel::Thread*> apThreads;

	for( int i = 0; i < _iNbThreads; i++ )
	{
		apThreads.push_back( new Parallel::Thread() );
	}
	MEASURE_START(test);
	{
		for( int i = 0; i < _iNbThreads; i++ )
		{
			apThreads[ i ]->Run( _pFunc, (void*)&data );
		}
		for( int i = 0; i < _iNbThreads; i++ )
		{
			apThreads[ i ]->Wait(); 
		}
	}
	MEASURE_STOP_LOG( test, "Time for Adding AND Processing Tasks  : ", 0 );
	
	for( int i = 0; i < _iNbThreads; i++ )
	{ 
		delete apThreads[ i ];
	}
	apThreads.clear();
	
	if( _bSaveResult )
	{
		vResults.push_back( BenchTestResult( GET_MEASURE(test), _strTestName ) );
	} 
 
	// Check results ........................................................................
	CheckResult( _iTestType, iNbIncrement );  
}

//===================================================================================================
void LaunchTestWithTaskManager( const char* _strTestName, int _iNbThreads, UINT32 _iTestType, UINT32 _iValue, bool _bSaveResult )
{
	const char* strFunctionTested;

	switch( _iTestType )
	{
	case Test::TEST_TYPE_INCREMENT_ATOMIC :	
		strFunctionTested = ".AtomicCounter:: Increment\n";
		break;
	case Test::TEST_TYPE_INCREMENT_ATOMIC_TLS :	
		strFunctionTested = ".AtomicCounter:: Increment\n TLS:: Get, Set\n";
		break;
	case Test::TEST_TYPE_INCREMENT_LOCK_MUTEX :
		strFunctionTested = ".Mutex:: Lock, Unlock\n";
		break;
	case Test::TEST_TYPE_INCREMENT_LOCK_SPINLOCK :
		strFunctionTested = ".Spin:: Lock, Unlock\n";
		break;
	case Test::TEST_TYPE_INCREMENT_LOCK_RWLOCK :
		strFunctionTested = ".RWLock:: Lock, Unlock\n";
		break; 
	}

	MSG( "=============================================================================\n" );
	MSG( " %s \n\n", _strTestName );
	MSG("=> Iteration %d\n", iLoopCount ); 
	MSG( " - functions : \n" );
	MSG( ".TaskManager:: BeginAddGroupTask, AddTask, EndAddGroupTask, WaitOnTask\n" );
	MSG( "%s", strFunctionTested );	
	MSG("-----------------------------------------------------------------------------\n");

	using namespace Parallel;

	Test::iUnsafeCnt = 0;
	Test::iConcurrentCnt.Set( 0 );		


	int iNbIncrement = MAX_TEST_FACTOR * NB_THREADS;
	int iNbTasks = iNbIncrement + 1;
	 
	
	MSG("-- working on %d concurrent Tasks -- \n", iNbTasks );

	pTaskManager->HardReset();

	MEASURE_START(test);
	{	
		TaskID iGroupTaskID = pTaskManager->BeginAddGroupTask();

		for( int i = 0; i < NB_THREADS; i++ )
		{
			for( int k = 0; k < MAX_TEST_FACTOR ; k++ )
			{
				Task task;

				Test::BuildTask( _iTestType, _iValue, task );

				pTaskManager->AddTask( &task, iGroupTaskID );
			}
		}

		pTaskManager->EndAddGroupTask( iGroupTaskID );
		
		pTaskManager->WaitOnTask( iGroupTaskID );
	}
	MEASURE_STOP_LOG( test, "Time for Adding AND Processing Tasks  : ", 0 );

	
	if( _bSaveResult )
	{
		vResults.push_back( BenchTestResult( GET_MEASURE(test), _strTestName ) );
	} 
 
	// Check results ........................................................................
	CheckResult( _iTestType, iNbIncrement );
}

//===================================================================================================
enum MemoryManagementType
{
	TEST_MEM_NAIVEMEMPOOL = 0
,	TEST_MEM_MEMPOOL 
,	TEST_MEM_HBFIMEMPOOL
,	TEST_MEM_LOCKFREEMEMPOOL 
,	TEST_MEM_DEFAULT 
};

//===================================================================================================
void TestSingleThreadMemoryAllocFree( const char* _strTestName, MemoryManagementType _type, bool _bPolluteAllocs, bool _bSaveResult )
{
	const char* strClassTested;

	switch( _type )
	{
	case TEST_MEM_NAIVEMEMPOOL :
		strClassTested = "NaiveMemoryPool";
		break;
	case TEST_MEM_MEMPOOL :
		strClassTested = "MemoryPool";
		break;
	case TEST_MEM_HBFIMEMPOOL :
		strClassTested = "HBFIMemoryPool";
		break;		
	case TEST_MEM_LOCKFREEMEMPOOL :
		strClassTested = "LockFreeMemoryPool";
		break;
	case TEST_MEM_DEFAULT :
		strClassTested = "Default";
		break; 
	}
	MSG( "=============================================================================\n" );
	MSG( " %s \n\n", _strTestName );
	MSG("=> Iteration %d\n", iLoopCount ); 
	MSG( " - functions : \n" );
	MSG( ".%s:: Allocate, Free\n", strClassTested );
	MSG("-----------------------------------------------------------------------------\n");

	int iTotalAllocCount;
	int iCurrentAllocCount;


#define POOL_ALLOC_FREE_TEST(_poolClass)	{ _poolClass		pool; \
											pool.Initialize( sizeof(Test::TestWork), MAX_TEST_FACTOR * 2, 0 ) ;			\
											if(_bPolluteAllocs) {pool.PolluteEvenIndices();}							\
											std::vector< UBYTE* > vAllocs;												\
											vAllocs.reserve( MAX_TEST_FACTOR );														\
											MEASURE_START(test);															\
											{																			\
												for( int i = 0; i < MAX_TEST_FACTOR; i++ )											\
												{																		\
													UBYTE* pAlloc = pool.Allocate();									\
													vAllocs.push_back( pAlloc );										\
												}																		\
																														\
												for( int i = 0; i < MAX_TEST_FACTOR; i++ )											\
												{																		\
													UBYTE* pAlloc = vAllocs.back();										\
													vAllocs.pop_back();													\
													pool.Free( pAlloc );												\
												}																		\
											}																			\
											MEASURE_STOP_LOG( test, "Time consumed for allocating and freeing : ", 0 );								\
											if( _bSaveResult )	{ vResults.push_back( BenchTestResult( GET_MEASURE(test), _strTestName ) );}		\
											iTotalAllocCount = pool.GetTotalAllocationsCount();													\
											iCurrentAllocCount = pool.GetAllocationsCount(); }
	
	switch( _type )
	{
	case TEST_MEM_NAIVEMEMPOOL :
		POOL_ALLOC_FREE_TEST(NaiveMemoryPool);
		break;
	case TEST_MEM_MEMPOOL :
		POOL_ALLOC_FREE_TEST(MemoryPool); 
		break;
	case TEST_MEM_HBFIMEMPOOL :
		POOL_ALLOC_FREE_TEST(HBFIMemoryPool); 
		break;		
	case TEST_MEM_LOCKFREEMEMPOOL :
		POOL_ALLOC_FREE_TEST(LockFreeMemoryPool); 
		break;
	case TEST_MEM_DEFAULT :
		{
			std::vector< Test::TestWork* > vPollutedAllocs;

			if(_bPolluteAllocs) 
			{
				vPollutedAllocs.reserve( MAX_TEST_FACTOR );

				for( int i = 0; i < MAX_TEST_FACTOR; i++ )											
				{																		
					Test::TestWork* pAlloc = new Test::TestWork();
					vPollutedAllocs.push_back( pAlloc );										
				}
			}							
			std::vector< Test::TestWork* > vAllocs;
			vAllocs.reserve( MAX_TEST_FACTOR );														
			MEASURE_START(test);															
			{																			
				for( int i = 0; i < MAX_TEST_FACTOR; i++ )											
				{																	
					Test::TestWork* pAlloc = new Test::TestWork();						
					vAllocs.push_back( pAlloc );										
				}																		
																						
				for( int i = 0; i < MAX_TEST_FACTOR; i++ )											
				{												
					Test::TestWork* pAlloc = vAllocs.back();										
					vAllocs.pop_back();	
					delete pAlloc;												
				}																		
			}																	
			MEASURE_STOP_LOG( test,"Time consumed for allocating and freeing : ", 0 );	
			if(_bPolluteAllocs) 
			{
				for( int i = 0; i < MAX_TEST_FACTOR; i++ )											
				{																			
					Test::TestWork* pAlloc = vPollutedAllocs.back();										
					vPollutedAllocs.pop_back();	
					delete pAlloc;
				}
			}									
			if( _bSaveResult )	
			{ 
				vResults.push_back( BenchTestResult( GET_MEASURE(test), _strTestName ) );
			}		
			iTotalAllocCount	= (_bPolluteAllocs) ? MAX_TEST_FACTOR + MAX_TEST_FACTOR: MAX_TEST_FACTOR;
			iCurrentAllocCount	= (_bPolluteAllocs) ? MAX_TEST_FACTOR : 0;
		}
		break; 
	}
								
//#undef MEM_ALLOC_FREE_TEST											
																				
	int iExpectedTotalAllocCount	= (_bPolluteAllocs)? MAX_TEST_FACTOR + MAX_TEST_FACTOR : MAX_TEST_FACTOR ;		
	int iExpectedCurrentAllocCount	= (_bPolluteAllocs)? MAX_TEST_FACTOR : 0;

	CHECK_ERROR( iExpectedTotalAllocCount == iTotalAllocCount,	
		"Did not properly proceed to every requested Allocate()!\n" );			
	CHECK_ERROR( iExpectedCurrentAllocCount	== iCurrentAllocCount,		
		"Did not properly proceed to every requested Free() !\n" ); 
}


//===================================================================================================
//===================================================================================================
//===================================================================================================
//=================================================================================================== 

// SMALL TEST ! Abstraction methods
////////////////////////////////////////
class IManager
{
public:

	virtual void Do( void ) = 0;

	virtual ~IManager( void ){}

	static IManager*	Create( void );
	static void			Destroy( IManager* );
};

class Manager : public IManager
{
public:
	Manager(){}
	~Manager(){}

	inline void Do( void )
	{
		Test::iConcurrentCnt.Add( 1 );
	}
};

IManager*	IManager::Create( void )
{
	return new Manager();
}
void		IManager::Destroy( IManager* _pManager )
{
	delete _pManager;
}

////////////////////////////////////////
class PManagerImpl
{
public:
	
	void Do()
	{
		Test::iConcurrentCnt.Add( 1 );
	}
};

class PManager
{
public:

	PManager( void )
	{
		mpImpl = new PManagerImpl();
	}
	~PManager( void )
	{
		delete mpImpl;
	}

	void Do( void )
	{
		mpImpl->Do();
	}

private:
	PManagerImpl* mpImpl;
};

////////////////////////////////////////
class	TManagerImpl
{
public:

	inline void Do()
	{
		Test::iConcurrentCnt.Add( 1 );
	}
};

template< class Impl >
class TManager
{
public:

	void Do()
	{
		mImpl.Do();
	}

private:
	Impl	mImpl;
};
////////////////////////////////////////
void Do( void )
{
	Test::iConcurrentCnt.Add( 1 );
}



int main(int argc, char* argv[])
{
	////////////////////////////////////////////

	while( false )
	{

	{
		Test::iConcurrentCnt.Set(0);

		MEASURE_START(test);

		for( int i = 0; i < MAX_TEST_FACTOR * 100; i++ )
		{
			Do();
		}

		MEASURE_STOP_LOG(test, "::Do() : ", 0 );
	}
		
	{
		Test::iConcurrentCnt.Set(0);

		PManager manager;

		MEASURE_START(test);

		for( int i = 0; i < MAX_TEST_FACTOR * 100; i++ )
		{
			manager.Do();
		}

		MEASURE_STOP_LOG(test, "PManager::Do() : ", 0 );
	}

	{
		Test::iConcurrentCnt.Set(0);

		IManager* pManager = IManager::Create();		

		MEASURE_START(test);

		for( int i = 0; i < MAX_TEST_FACTOR * 100; i++ )
		{
			pManager->Do();
		}
		MEASURE_STOP_LOG(test, "IManager::Do() :", 0 );

		IManager::Destroy( pManager );
	}
	{
		Test::iConcurrentCnt.Set(0);

		Manager* pManager = (Manager*)IManager::Create();		

		MEASURE_START(test);

		for( int i = 0; i < MAX_TEST_FACTOR * 100; i++ )
		{
			pManager->Do();
		}
		MEASURE_STOP_LOG(test, "IManagerImpl::Do() :", 0 );

		IManager::Destroy( pManager );
	}
	{
		Test::iConcurrentCnt.Set(0);

		TManager<TManagerImpl> manager;		

		MEASURE_START(test);

		for( int i = 0; i < MAX_TEST_FACTOR * 100; i++ )
		{
			manager.Do();
		}
		MEASURE_STOP_LOG(test, "TManager::Do() :", 0 );
	}

	

	LOG("OK...\n");
	}












	////////////////////////////////////////////
	Parallel::AtomicCounter iTest ( 0xFFFFFFFF );

	 
	iTest.Add( 100 );

	UINT32 iTTT = iTest.Get();

    using namespace Parallel;
    using namespace Test;

	UnitTest_FindLSB();
	UnitTest_FindMSB();

	Test::iSleepTime = 0;

	if( FORCE_THREAD_COUNT )
	{
		NB_THREADS = WORKER_THREAD_COUNT;
	}
	else
	{
		NB_THREADS = Parallel::GetNbCores() - 1;
	}

	MAX = MAX_TEST_FACTOR * NB_THREADS; 



	pTaskManager = nullptr;

	gInts = nullptr;

	if( TEST_TASK_MANAGER_SCHEDULING_AND_MUTEX_AND_LOCKFREEQUEUE || TEST_WORKERTHREAD_AND_MUTEX_AND_LOCKFREEQUEUE )
	{	
		gInts = new INT32[ MAX ];

		for( int i = 0 ; i < MAX; i ++ )
		{
			gInts[ i ] = i;
		}
	}



//    INIT_MEASURE_SYSTEM();	
	MSG("================================================================================\n");	
	MSG(" System has %d CPU cores \n\n", Parallel::GetNbCores() );

	if(	TEST_BASIC_THREAD_PRINT )
	do
    {
		Profiling::Initialize( 100, 8 );

	//	LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_PRINT", NB_THREADS, & Test::ConcurrentPrint, (void*) astrNames[ i ], 0, false );
		MSG("================================================================================\n");	
		MSG(" TEST_BASIC_THREAD_PRINT - Basic threaded 'printf'\n\n" );
		MSG("=> Iteration %d\n", iLoopCount ); 
		MSG(" - functions : \n");
		MSG(".Thread:: Create, Run, Wait, Destroy\n");
		MSG("--------------------------------------------------------------------------------\n");

		std::vector<Thread*> apThreads;
		MB_ASSERT( NB_THREADS <= 100, "Cannot run this test with more than 100 threads\n" );

		char astrNames[100][16];

        for(int i = 0; i < NB_THREADS; i++)
        {
			sprintf( astrNames[ i ], "Thread %d", i );
		}

        for(int i = 0; i < NB_THREADS; i++)
        {
            apThreads.push_back( new Thread() );

			apThreads[ i ]->Run( & Test::ConcurrentPrint, (void*) astrNames[ i ] );
        }
        for(int i = 0; i < NB_THREADS; i++)
        {
            apThreads[ i ]->Wait();
            delete apThreads[ i ];
        }
		apThreads.clear();
		
		//  then sort them , 
		//	and finally print them
		Profiling::SortStatsByStartTime();
		Profiling::PrintStats();

		// Clear them when you're done processing
		Profiling::ClearStats();

		Profiling::Release();
	} 
	while( TEST_RUNFOREVER && ++iLoopCount ); 
	// #endif // TEST_BASIC_THREAD_PRINT
  
  
	///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

#define CHECK_MEM_ALLOC_COUNTS(_nbThreads,_nbAllocFreePerThread,_bPolluted)																\
		int iExpectedTotalAllocCount	= (bPollute)? (_nbAllocFreePerThread) + (_nbAllocFreePerThread) : (_nbAllocFreePerThread) ;		\
		int iExpectedCurrentAllocCount	= (bPollute)? (_nbAllocFreePerThread) : 0;														\
																																		\
		CHECK_ERROR( (_nbThreads) * iExpectedTotalAllocCount == pool.GetTotalAllocationsCount(),										\
			"Did not properly proceed to every requested Allocate()!\n" );																\
		CHECK_ERROR( (_nbThreads) * iExpectedCurrentAllocCount	== pool.GetAllocationsCount(),											\
			"Did not properly proceed to every requested Free() !\n" );											


	///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	// MULTI THREAD - ALLOC FREE ALLOC FREE ALLOC FREE ALLOC FREE ... - FRESH START

	if( TEST_BASIC_THREAD_CONCURRENT_FRESH_DEFAULT_ALLOC_FREE_IMMEDIATE )
	do
	{ 	
		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_FRESH_DEFAULT_ALLOC_FREE_IMMEDIATE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_DEFAULT_ALLOC_FREE, 0, true );
	} // #endif 
	while( TEST_RUNFOREVER && ++iLoopCount ); 


	if( TEST_BASIC_THREAD_CONCURRENT_FRESH_NAIVEMEMORYPOOL_ALLOC_FREE_IMMEDIATE )
	do
	{		
		bool bPollute = false;

		NaiveMemoryPool pool;
		pool.Initialize( sizeof(Test::TestWork), MAX / 3, 0 ) ;

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_FRESH_NAIVEMEMORYPOOL_ALLOC_FREE_IMMEDIATE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_LOCK_NAIVEMEMORYPOOL_ALLOC_FREE, (UINT32) &pool, true );
		
		CHECK_MEM_ALLOC_COUNTS((NB_THREADS + 1), MAX_TEST_FACTOR, bPollute);
	}
	while( TEST_RUNFOREVER && ++iLoopCount ); 
	
	if( TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCK_MEMORYPOOL_ALLOC_FREE_IMMEDIATE )
	do
	{		
		bool bPollute = false;

		MemoryPool pool;
		pool.Initialize( sizeof(Test::TestWork), MAX / 3, 0 ) ;

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCK_MEMORYPOOL_ALLOC_FREE_IMMEDIATE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_LOCK_MEMORYPOOL_ALLOC_FREE, (UINT32) &pool, true );
		
		CHECK_MEM_ALLOC_COUNTS((NB_THREADS + 1), MAX_TEST_FACTOR, bPollute);
	}
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCK_HBFIMEMORYPOOL_ALLOC_FREE_IMMEDIATE )
	do
	{		
		bool bPollute = false;

		HBFIMemoryPool pool;
		pool.Initialize( sizeof(Test::TestWork), MAX / 3, 0 ) ;

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCK_HBFIMEMORYPOOL_ALLOC_FREE_IMMEDIATE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_LOCK_HBFIMEMORYPOOL_ALLOC_FREE, (UINT32) &pool, true );
		
		CHECK_MEM_ALLOC_COUNTS((NB_THREADS + 1), MAX_TEST_FACTOR, bPollute);
	}
	while( TEST_RUNFOREVER && ++iLoopCount ); 


	if( TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCKFREEMEMORYPOOL_ALLOC_FREE_IMMEDIATE )
	do
	{ 	
		bool bPollute = false;

		LockFreeMemoryPool pool;
		pool.Initialize( sizeof(Test::TestWork), MAX / 3, 0 ) ;

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCKFREEMEMORYPOOL_ALLOC_FREE_IMMEDIATE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_LOCKFREEMEMORYPOOL_ALLOC_FREE, (UINT32) &pool, true );
 
		CHECK_MEM_ALLOC_COUNTS((NB_THREADS + 1), MAX_TEST_FACTOR, bPollute);
	}	
	while( TEST_RUNFOREVER && ++iLoopCount ); 
 
	///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	// MULTI THREAD - ALLOC FREE ALLOC FREE ALLOC FREE ALLOC FREE ... - POLLUTED
	if( TEST_BASIC_THREAD_CONCURRENT_POLLUTED_DEFAULT_ALLOC_FREE_IMMEDIATE )
	do
	{ 	
		bool bPollute = true;
		
		std::vector< Test::TestWork* > vPollutedAllocs;

		if(bPollute) 
		{
			vPollutedAllocs.reserve( MAX );

			for( int i = 0; i < MAX; i++ )											
			{																		
				Test::TestWork* pAlloc = new Test::TestWork();
				vPollutedAllocs.push_back( pAlloc );										
			}
		}							 

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_POLLUTED_DEFAULT_ALLOC_FREE_IMMEDIATE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_DEFAULT_ALLOC_FREE, 0, true );

		if(bPollute) 
		{
			for( int i = 0; i < MAX; i++ )											
			{																			
				Test::TestWork* pAlloc = vPollutedAllocs.back();										
				vPollutedAllocs.pop_back();	
				delete pAlloc;
			}
		}
	} 
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_NAIVEMEMORYPOOL_ALLOC_FREE_IMMEDIATE )
	do
	{		
		bool bPollute = true;

		UINT32 iMaxAlloc = (MAX_TEST_FACTOR + MAX_TEST_FACTOR) * (NB_THREADS + 1);
		NaiveMemoryPool pool;
		pool.Initialize( sizeof(Test::TestWork), iMaxAlloc, 0 ) ;
		pool.PolluteEvenIndices();

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_NAIVEMEMORYPOOL_ALLOC_FREE_IMMEDIATE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_LOCK_NAIVEMEMORYPOOL_ALLOC_FREE, (UINT32) &pool, true );
		
		CHECK_MEM_ALLOC_COUNTS((NB_THREADS + 1), MAX_TEST_FACTOR, bPollute);
	}
	while( TEST_RUNFOREVER && ++iLoopCount ); 


	if( TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_MEMORYPOOL_ALLOC_FREE_IMMEDIATE )
	do
	{		
		bool bPollute = true;

		UINT32 iMaxAlloc = (MAX_TEST_FACTOR + MAX_TEST_FACTOR) * (NB_THREADS + 1);
		MemoryPool pool;
		pool.Initialize( sizeof(Test::TestWork), iMaxAlloc, 0 ) ;
		pool.PolluteEvenIndices();

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_MEMORYPOOL_ALLOC_FREE_IMMEDIATE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_LOCK_MEMORYPOOL_ALLOC_FREE, (UINT32) &pool, true );
		
		CHECK_MEM_ALLOC_COUNTS((NB_THREADS + 1), MAX_TEST_FACTOR, bPollute);
	}
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_HBFIMEMORYPOOL_ALLOC_FREE_IMMEDIATE )
	do
	{		
		bool bPollute = true;

		UINT32 iMaxAlloc = (MAX_TEST_FACTOR + MAX_TEST_FACTOR) * (NB_THREADS + 1);
		HBFIMemoryPool pool;
		pool.Initialize( sizeof(Test::TestWork), iMaxAlloc, 0 ) ;
		pool.PolluteEvenIndices();

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_HBFIMEMORYPOOL_ALLOC_FREE_IMMEDIATE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_LOCK_HBFIMEMORYPOOL_ALLOC_FREE, (UINT32) &pool, true );
		
		CHECK_MEM_ALLOC_COUNTS((NB_THREADS + 1), MAX_TEST_FACTOR, bPollute);
	}
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCKFREEMEMORYPOOL_ALLOC_FREE_IMMEDIATE )
	do
	{ 	
		bool bPollute = true;

		UINT32 iMaxAlloc = (MAX_TEST_FACTOR + MAX_TEST_FACTOR) * (NB_THREADS + 1);			
		LockFreeMemoryPool pool;
		pool.Initialize( sizeof(Test::TestWork), iMaxAlloc, 0 ) ;
		pool.PolluteEvenIndices();

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCKFREEMEMORYPOOL_ALLOC_FREE_IMMEDIATE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_LOCKFREEMEMORYPOOL_ALLOC_FREE, (UINT32) &pool, true );
				
		CHECK_MEM_ALLOC_COUNTS((NB_THREADS + 1), MAX_TEST_FACTOR, bPollute);
 
	}	 
	while( TEST_RUNFOREVER && ++iLoopCount ); 
	///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	// MULTI THREAD - ALLOC ALLOC ALLOC ... FREE FREE FREE FREE - FRESH START
	
	if( TEST_BASIC_THREAD_CONCURRENT_FRESH_DEFAULT_ALLOC_FREE )
	do
	{ 	
		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_FRESH_DEFAULT_ALLOC_FREE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_DEFAULT_ALLOC, 0, true );
	} // #endif 
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCK_NAIVEMEMORYPOOL_ALLOC_FREE )
	do
	{		
		bool bPollute = false;

		UINT32 iMaxAlloc = (MAX_TEST_FACTOR + 0) * (NB_THREADS + 1);
		NaiveMemoryPool pool;
		pool.Initialize( sizeof(Test::TestWork), iMaxAlloc, 0 ) ;

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCK_NAIVEMEMORYPOOL_ALLOC_FREE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_LOCK_NAIVEMEMORYPOOL_ALLOC, (UINT32) &pool, true );
		
		CHECK_MEM_ALLOC_COUNTS((NB_THREADS + 1), MAX_TEST_FACTOR, bPollute);
	}
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCK_MEMORYPOOL_ALLOC_FREE )
	do
	{		
		bool bPollute = false;

		UINT32 iMaxAlloc = (MAX_TEST_FACTOR + 0) * (NB_THREADS + 1);
		MemoryPool pool;
		pool.Initialize( sizeof(Test::TestWork), iMaxAlloc, 0 ) ;

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCK_MEMORYPOOL_ALLOC_FREE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_LOCK_MEMORYPOOL_ALLOC, (UINT32) &pool, true );
		
		CHECK_MEM_ALLOC_COUNTS((NB_THREADS + 1), MAX_TEST_FACTOR, bPollute);
	}
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCK_HBFIMEMORYPOOL_ALLOC_FREE )
	do
	{		
		bool bPollute = false;

		UINT32 iMaxAlloc = (MAX_TEST_FACTOR + 0) * (NB_THREADS + 1);
		HBFIMemoryPool pool;
		pool.Initialize( sizeof(Test::TestWork), iMaxAlloc, 0 ) ;

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCK_HBFIMEMORYPOOL_ALLOC_FREE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_LOCK_HBFIMEMORYPOOL_ALLOC_FREE, (UINT32) &pool, true );
		
		CHECK_MEM_ALLOC_COUNTS((NB_THREADS + 1), MAX_TEST_FACTOR, bPollute);
	}
	while( TEST_RUNFOREVER && ++iLoopCount ); 
	
	if( TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCKFREEMEMORYPOOL_ALLOC_FREE )
	do
	{ 	
		bool bPollute = false;

		UINT32 iMaxAlloc = (MAX_TEST_FACTOR + 0) * (NB_THREADS + 1);
		
		LockFreeMemoryPool pool;
		pool.Initialize( sizeof(Test::TestWork), iMaxAlloc, 0 ) ;

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_FRESH_LOCKFREEMEMORYPOOL_ALLOC_FREE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_LOCKFREEMEMORYPOOL_ALLOC, (UINT32) &pool, true );
 
		CHECK_MEM_ALLOC_COUNTS((NB_THREADS + 1), MAX_TEST_FACTOR, bPollute);
	}	
	while( TEST_RUNFOREVER && ++iLoopCount ); 
 
	///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	//								MULTI THREAD - ALLOC ALLOC ALLOC ... FREE FREE FREE FREE - POLLUTED 
	if( TEST_BASIC_THREAD_CONCURRENT_POLLUTED_DEFAULT_ALLOC_FREE )
	do
	{ 	
		bool bPollute = true;
		
		std::vector< Test::TestWork* > vPollutedAllocs;

		if(bPollute) 
		{
			vPollutedAllocs.reserve( MAX );

			for( int i = 0; i < MAX; i++ )											
			{																		
				Test::TestWork* pAlloc = new Test::TestWork();
				vPollutedAllocs.push_back( pAlloc );										
			}
		}							 

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_POLLUTED_DEFAULT_ALLOC_FREE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_DEFAULT_ALLOC, 0, true );

		if(bPollute) 
		{
			for( int i = 0; i < MAX; i++ )											
			{																			
				Test::TestWork* pAlloc = vPollutedAllocs.back();										
				vPollutedAllocs.pop_back();	
				delete pAlloc;
			}
		}			

	} // #endif 
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_NAIVEMEMORYPOOL_ALLOC_FREE )
	do
	{		
		bool bPollute = true;

		UINT32 iMaxAlloc = (MAX_TEST_FACTOR + MAX_TEST_FACTOR) * (NB_THREADS + 1);
		NaiveMemoryPool pool;
		pool.Initialize( sizeof(Test::TestWork), iMaxAlloc, 0 ) ;
		pool.PolluteEvenIndices();

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_NAIVEMEMORYPOOL_ALLOC_FREE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_LOCK_NAIVEMEMORYPOOL_ALLOC, (UINT32) &pool, true );
		
		CHECK_MEM_ALLOC_COUNTS((NB_THREADS + 1), MAX_TEST_FACTOR, bPollute);
	} 
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_MEMORYPOOL_ALLOC_FREE )
	do
	{		
		bool bPollute = true;

		UINT32 iMaxAlloc = (MAX_TEST_FACTOR + MAX_TEST_FACTOR) * (NB_THREADS + 1);
		MemoryPool pool;
		pool.Initialize( sizeof(Test::TestWork), iMaxAlloc, 0 ) ;
		pool.PolluteEvenIndices();

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_MEMORYPOOL_ALLOC_FREE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_LOCK_MEMORYPOOL_ALLOC, (UINT32) &pool, true );
		
		CHECK_MEM_ALLOC_COUNTS((NB_THREADS + 1), MAX_TEST_FACTOR, bPollute);
	} 
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_HBFIMEMORYPOOL_ALLOC_FREE )
	do
	{		
		bool bPollute = true;

		UINT32 iMaxAlloc = (MAX_TEST_FACTOR + MAX_TEST_FACTOR) * (NB_THREADS + 1);
		HBFIMemoryPool pool;
		pool.Initialize( sizeof(Test::TestWork), iMaxAlloc, 0 ) ;
		pool.PolluteEvenIndices();

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCK_HBFIMEMORYPOOL_ALLOC_FREE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_LOCK_HBFIMEMORYPOOL_ALLOC, (UINT32) &pool, true );
		
		CHECK_MEM_ALLOC_COUNTS((NB_THREADS + 1), MAX_TEST_FACTOR, bPollute);
	} 
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCKFREEMEMORYPOOL_ALLOC_FREE )
	do
	{ 	
		bool bPollute = true;

		UINT32 iMaxAlloc = (MAX_TEST_FACTOR + MAX_TEST_FACTOR) * (NB_THREADS + 1);
		
		LockFreeMemoryPool pool;
		pool.Initialize( sizeof(Test::TestWork), iMaxAlloc, 0 ) ;
		pool.PolluteEvenIndices();

		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_POLLUTED_LOCKFREEMEMORYPOOL_ALLOC_FREE", NB_THREADS + 1, & Test::ConcurrentMemoryAllocFree, Test::TEST_TYPE_LOCKFREEMEMORYPOOL_ALLOC, (UINT32) &pool, true );
 
		CHECK_MEM_ALLOC_COUNTS((NB_THREADS + 1), MAX_TEST_FACTOR, bPollute);
	}	
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	//														TEST_SINGLETHREAD_
	////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	  
	if( TEST_SINGLETHREAD_FRESH_NAIVEMEMORYPOOL_ALLOC_FREE )
	do
	{
		bool bPollute = false;
		TestSingleThreadMemoryAllocFree( "TEST_SINGLETHREAD_FRESH_NAIVEMEMORYPOOL_ALLOC_FREE", TEST_MEM_NAIVEMEMPOOL, bPollute, true );
	}
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_SINGLETHREAD_FRESH_MEMORYPOOL_ALLOC_FREE )
	do
	{
		bool bPollute = false;
		TestSingleThreadMemoryAllocFree( "TEST_SINGLETHREAD_FRESH_MEMORYPOOL_ALLOC_FREE", TEST_MEM_MEMPOOL, bPollute, true );
	}
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_SINGLETHREAD_FRESH_HBFIMEMORYPOOL_ALLOC_FREE )
	do
	{
		bool bPollute = false;
		TestSingleThreadMemoryAllocFree( "TEST_SINGLETHREAD_FRESH_HBFIMEMORYPOOL_ALLOC_FREE", TEST_MEM_HBFIMEMPOOL, bPollute, true );
	} 
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_SINGLETHREAD_FRESH_LOCKFREEMEMORYPOOL_ALLOC_FREE )
	do
	{
		bool bPollute = false;
		TestSingleThreadMemoryAllocFree( "TEST_SINGLETHREAD_FRESH_LOCKFREEMEMORYPOOL_ALLOC_FREE", TEST_MEM_LOCKFREEMEMPOOL, bPollute, true );
	} 
	while( TEST_RUNFOREVER && ++iLoopCount ); 
		 
	if( TEST_SINGLETHREAD_POLLUTED_NAIVEMEMORYPOOL_ALLOC_FREE )
	do
	{
		bool bPollute = true;
		TestSingleThreadMemoryAllocFree( "TEST_SINGLETHREAD_POLLUTED_NAIVEMEMORYPOOL_ALLOC_FREE", TEST_MEM_NAIVEMEMPOOL, bPollute, true );
	}
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_SINGLETHREAD_POLLUTED_MEMORYPOOL_ALLOC_FREE )
	do
	{
		bool bPollute = true;
		TestSingleThreadMemoryAllocFree( "TEST_SINGLETHREAD_POLLUTED_MEMORYPOOL_ALLOC_FREE", TEST_MEM_MEMPOOL, bPollute, true );
	}
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_SINGLETHREAD_POLLUTED_HBFIMEMORYPOOL_ALLOC_FREE )
	do
	{
		bool bPollute = true;
		TestSingleThreadMemoryAllocFree( "TEST_SINGLETHREAD_POLLUTED_HBFIMEMORYPOOL_ALLOC_FREE", TEST_MEM_HBFIMEMPOOL, bPollute, true );
	}
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_SINGLETHREAD_POLLUTED_LOCKFREEMEMORYPOOL_ALLOC_FREE )
	do
	{
		bool bPollute = true;
		TestSingleThreadMemoryAllocFree( "TEST_SINGLETHREAD_POLLUTED_LOCKFREEMEMORYPOOL_ALLOC_FREE", TEST_MEM_LOCKFREEMEMPOOL, bPollute, true );
	} 
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_SINGLETHREAD_FRESH_DEFAULT_ALLOC_FREE )
	do
	{
		bool bPollute = false;
		TestSingleThreadMemoryAllocFree( "TEST_SINGLETHREAD_FRESH_DEFAULT_ALLOC_FREE", TEST_MEM_DEFAULT, bPollute, true );
	}
	while( TEST_RUNFOREVER && ++iLoopCount ); 
		
	if( TEST_SINGLETHREAD_POLLUTED_DEFAULT_ALLOC_FREE )
	do
	{
		bool bPollute = true;
		TestSingleThreadMemoryAllocFree( "TEST_SINGLETHREAD_POLLUTED_DEFAULT_ALLOC_FREE", TEST_MEM_DEFAULT, bPollute, true );
	}  
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_BASIC_THREAD_CONCURRENT_ADD_TLS_ATOMIC )
	do
	{		
		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_ADD_TLS_ATOMIC", NB_THREADS + 1, & Test::ConcurrentIncrementWithTLS, Test::TEST_TYPE_INCREMENT_ATOMIC_TLS, 0, true );
	}  	
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_BASIC_THREAD_CONCURRENT_ADD_ATOMIC )
	do
	{	
		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_ADD_ATOMIC", NB_THREADS + 1, & Test::ConcurrentIncrement, Test::TEST_TYPE_INCREMENT_ATOMIC, 0, true );
	} 
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_BASIC_THREAD_CONCURRENT_ADD_SPINLOCK )
	do
	{	
		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_ADD_SPINLOCK", NB_THREADS + 1, & Test::ConcurrentIncrement, Test::TEST_TYPE_INCREMENT_LOCK_SPINLOCK, 0, true );
	} 
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_BASIC_THREAD_CONCURRENT_ADD_RWLOCK )
	do
	{	
		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_ADD_RWLOCK", NB_THREADS + 1, & Test::ConcurrentIncrement, Test::TEST_TYPE_INCREMENT_LOCK_RWLOCK, 0, true );
	}  
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	if( TEST_BASIC_THREAD_CONCURRENT_ADD_MUTEX )
	do
	{	
		LaunchTestWithBasicThreads( "TEST_BASIC_THREAD_CONCURRENT_ADD_MUTEX", NB_THREADS + 1, & Test::ConcurrentIncrement, Test::TEST_TYPE_INCREMENT_LOCK_MUTEX, 0, true );
	} 
	while( TEST_RUNFOREVER && ++iLoopCount ); 
 
	if( TEST_WORKERTHREAD_CONCURRENT_ADD_ATOMIC )
	do
	{ 		
		LockFreeMemoryPool pool;
		pool.Initialize( sizeof(Test::TestWork), MAX, 0 ) ;

		MSG("================================================================================\n");			 
		MSG(" TEST_WORKERTHREAD_CONCURRENT_ADD_ATOMIC - WorkerThread, AtomicCounter \n\n" );
		MSG("=> Iteration %d\n", iLoopCount ); 
		MSG(" involved functions : \n");
		MSG(".WorkerThread::AddWork\n");
		MSG(".WorkerThread::Run\n");
		MSG(".WorkerThread::Wait\n"); 
		MSG("--------------------------------------------------------------------------------\n");

		Test::iConcurrentCnt.Set( 0 );
 
		int iNbTasks = MAX_TEST_FACTOR * NB_THREADS;
		MSG("-- working on %d concurrent Tasks -- \n", iNbTasks );

		int iMaxTestFactor = GetNextPowerOfTwo( MAX_TEST_FACTOR ) ;

		std::vector<Parallel::WorkerThread*> apThreads; 
		for(int i = 0; i < NB_THREADS; i++)
		{
			apThreads.push_back( new WorkerThread( iMaxTestFactor ) ); 
		}

//		MEASURE_START();
		for(int i = 0; i < NB_THREADS; i++)
		{
			for( int k = 0; k < MAX_TEST_FACTOR ; k++ )
			{
//				Test::TestWork* pTask = new Test::TestWork( Test::TEST_TYPE_INCREMENT_ATOMIC, 0 );

				UBYTE* pBlock = pool.Allocate();
				Test::TestWork* pTask = new( pBlock ) Test::TestWork( Test::TEST_TYPE_INCREMENT_ATOMIC, (UINT32) &pool );

				apThreads[ i ]->AddWork( pTask );
			}
		}

		MEASURE_START(test);
		for(int i = 0; i < NB_THREADS; i++)
		{
			apThreads[ i ]->Run( WorkerThread::RUN_ONCE );
		}
		for(int i = 0; i < NB_THREADS; i++)
		{
			apThreads[ i ]->Wait();
		}
		MEASURE_STOP_LOG(test, "Time for Adding AND Processing Tasks  : ", 0 );
			
		vResults.push_back( BenchTestResult( GET_MEASURE(test), "TEST_WORKERTHREAD_CONCURRENT_ADD_ATOMIC") );

		for(int i = 0; i < NB_THREADS; i++)
		{
			delete apThreads[ i ];
		}
		apThreads.clear(); 

		// Check results ........................................................................
		CHECK_ERROR( iNbTasks == Test::iConcurrentCnt.Get(), 
			" Unexpected result - the iConcurrentCnt is %d when it should be %d\n", Test::iConcurrentCnt.Get(), iNbTasks );  
	} 
	while( TEST_RUNFOREVER && ++iLoopCount ); 

	////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	//														TEST_TASK_MANAGER_
	////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	if( TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_LOCK_MUTEX		
	||	TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_LOCK_SPINLOCK	
	||	TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_LOCK_RWLOCK		
	||	TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_ATOMIC			
	||	TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_TLS_ATOMIC		
	||	TEST_TASK_MANAGER_SCHEDULING_AND_MUTEX_AND_LOCKFREEQUEUE	
	||	TEST_PROFILING_TASK_MANAGER ) 
	{
		// Setup the TaskManager !!! ----------------------------------------------

		// Task Queues infos ---------------------------------
		std::vector< Parallel::TaskQueueInfo > aInfos;
		// WHY + 2 ???
//		aInfos.reserve( NB_THREADS + 2 );
		aInfos.reserve( NB_THREADS + 1 );
		
		Parallel::TaskQueueInfo emptyInfo;
		// WHY + 2 ???
//		for( UINT32 i = 0; i < NB_THREADS + 2; i++ )
		for( UINT32 i = 0; i < NB_THREADS + 1; i++ )
		{
			aInfos.push_back( emptyInfo );
		}
		
		const UINT32 iMaxTaskForSpecificAffinity = GetNextPowerOfTwo( 100 );

		UINT32 iMaxTasks = 0;
		// thread specific queues
		// FIXME : remove +2 and use +1 
//		for( UINT32 i = 0; i < NB_THREADS + 1; i++ )
		for( UINT32 i = 0; i < NB_THREADS; i++ )
		{
			aInfos[ i ].miAffinityMask = 1 << i;
			aInfos[ i ].miPriority = 1;
			aInfos[ i ].miMaxTasks = iMaxTaskForSpecificAffinity;

			iMaxTasks += aInfos[ i ].miMaxTasks;
		}
		// global shared queue
		//aInfos[ NB_THREADS + 1 ].miAffinityMask = Parallel::TaskManager::UNDEFINED_AFFINITY;
		//aInfos[ NB_THREADS + 1 ].miPriority = 1;
		//aInfos[ NB_THREADS + 1 ].miMaxTasks = GetNextPowerOfTwo( MAX );
		//iMaxTasks += aInfos[ NB_THREADS + 1 ].miMaxTasks;
		
		aInfos[ NB_THREADS ].miAffinityMask = Parallel::TaskManager::UNDEFINED_AFFINITY;
		aInfos[ NB_THREADS ].miPriority = 1;
		aInfos[ NB_THREADS ].miMaxTasks = GetNextPowerOfTwo( MAX );		
		iMaxTasks += aInfos[ NB_THREADS ].miMaxTasks;

		// FIXME : remove +2 and use +1 
//		pTaskManager = new TaskManager( NB_THREADS, iMaxTasks, 1, 1, NB_THREADS + 2, &aInfos[0] );
		pTaskManager = new TaskManager( NB_THREADS, iMaxTasks, 1, 1, NB_THREADS + 1, &aInfos[0] );
		pTaskManager->Run();
	} 
	// 
	
	if( TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_ATOMIC )
	do
	{
		LaunchTestWithTaskManager( "TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_ATOMIC", NB_THREADS, Test::TEST_TYPE_INCREMENT_ATOMIC, 0, true );
	}
	while( TEST_RUNFOREVER && ++iLoopCount );
	// #endif // TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_ATOMIC
  

	if(	TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_TLS_ATOMIC )
	do
	{ 					
		MSG("================================================================================\n");			
		MSG(" TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_TLS_ATOMIC \n\n" );
		MSG("=> Iteration %d\n", iLoopCount ); 
		MSG(" - functions : \n");
		MSG(".TaskManager:: BeginAddGroupTask, AddTask, EndAddGroupTask, WaitOnTask\n");
		MSG(".TLS:: Get, Set\n");
		MSG("--------------------------------------------------------------------------------\n");

		Test::iConcurrentCnt.Set( 0 ); 

		int iNbIncrement = MAX_TEST_FACTOR * NB_THREADS;
		int iNbTasks =	1				// Group
						+ 1				// Group TLS BEGIN
						+ NB_THREADS	// TLS BEGIN
						+ 1				// Group Increment 
						+ iNbIncrement	// Increment
						+ NB_THREADS	// TLS END
						;

		bool bMainThreadHelpsOnWait = true;

		int iNbThreadUsed = NB_THREADS;
		iNbThreadUsed += (bMainThreadHelpsOnWait)? 1 : 0;

		MSG("-- working on %d concurrent Tasks -- \n", iNbTasks );

		//TaskManager* pTaskManager = new TaskManager( NB_THREADS, iNbTasks , 1, 1 );
		//pTaskManager->Run();
		pTaskManager->HardReset();
		{ 
			Parallel::Sleep(1000);

			// go go go !
			MEASURE_START(test);

			TaskID iGroupTaskID = pTaskManager->BeginAddGroupTask();
			pTaskManager->SetDebugName( iGroupTaskID , "TEST" );
			{
				// TASK - PREPARE THREAD GROUP - on Specific Affinity ---------------------------------------------------------	
				TaskID iStartTaskID;
				{
					iStartTaskID = pTaskManager->BeginAddGroupTask( nullptr, iGroupTaskID );
					pTaskManager->SetDebugName( iStartTaskID , "GROUP_TEST_TYPE_INCREMENT_ATOMIC_TLS_BEGIN" );
					{
						for( int i = 0; i < NB_THREADS; i++ )
						{
							UINT32 iSpecificAffinity = 1 << i;

							Task taskStart;
							Test::BuildTask( Test::TEST_TYPE_INCREMENT_ATOMIC_TLS_BEGIN, 0, taskStart );

							TaskID iTaskID = pTaskManager->AddTask( &taskStart
																	, iStartTaskID
																	, Parallel::TaskManager::NO_TASK
																	, TaskManager::UNDEFINED_PRIORITY
																	, iSpecificAffinity );

							pTaskManager->SetDebugName( iTaskID , "TEST_TYPE_INCREMENT_ATOMIC_TLS_BEGIN" );
						}
					}
					pTaskManager->EndAddGroupTask( iStartTaskID );	

					// init from the main thread as well
					Test::tlsSafeCnt.Set( 0 );
				}

				// TASK - LAUNCH TASKS ! --------------------------------------------------------- 
				TaskID iBatchTaskID;
				{
					iBatchTaskID = pTaskManager->BeginAddGroupTask( nullptr, iGroupTaskID, iStartTaskID );
					pTaskManager->SetDebugName( iBatchTaskID , "GROUP_TEST_TYPE_INCREMENT_ATOMIC_TLS" );
					{
						for( int k = 0; k < NB_THREADS * MAX_TEST_FACTOR ; k++ )
						{
							Task task;
							Test::BuildTask( Test::TEST_TYPE_INCREMENT_ATOMIC_TLS, 0, task );

							TaskID iTaskID = pTaskManager->AddTask( &task
																	, iBatchTaskID );
							
							pTaskManager->SetDebugName( iTaskID , "TEST_TYPE_INCREMENT_ATOMIC_TLS" );
						}
					}
					pTaskManager->EndAddGroupTask( iBatchTaskID );	 
				}

				// TASK - FINISH THREAD GROUP - on Specific Affinity ---------------------------------------------------------
						// + 1 for the main thread !
				for( int i = 0; i < NB_THREADS; i++ )
				{
					UINT32 iSpecificAffinity = 1 << i;
				
					Task taskEnd;
					Test::BuildTask( Test::TEST_TYPE_INCREMENT_ATOMIC_TLS_END, 0, taskEnd );

					TaskID iFinishTaskID = pTaskManager->AddTask( &taskEnd
																, iGroupTaskID
																, iBatchTaskID
																, TaskManager::UNDEFINED_PRIORITY
																, iSpecificAffinity );

					pTaskManager->SetDebugName( iFinishTaskID , "TEST_TYPE_INCREMENT_ATOMIC_TLS_END" );
				}
			}
			pTaskManager->EndAddGroupTask( iGroupTaskID );
			 
			pTaskManager->WaitOnTask( iGroupTaskID, bMainThreadHelpsOnWait );

			// gather from the main thread as well
			Test::iConcurrentCnt.Add( Test::tlsSafeCnt.Get() );
			Test::tlsSafeCnt.Set( 0 );

			MEASURE_STOP_LOG( test,"Time for Adding AND Processing Tasks  : ", 0 );

			MSG( ".System w/ %d CPU cores and using %d threads \n", Parallel::GetNbCores(), pTaskManager->GetNbWorkers() + 1 );
			MSG( ".Average work load per thread : %d tasks \n", iNbTasks / iNbThreadUsed );
			MSG( ".Average duration per task : %f \n", GET_MEASURE(test) / iNbTasks  );

			vResults.push_back( BenchTestResult( GET_MEASURE(test), "TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_TLS_ATOMIC") );
		}

		//delete pTaskManager;

		// Check results ........................................................................
		CHECK_ERROR( iNbIncrement == Test::iConcurrentCnt.Get(), 
			" Unexpected result - the iConcurrentCnt is %d when it should be %d\n", Test::iConcurrentCnt.Get(), iNbIncrement );  
	}
	while( TEST_RUNFOREVER && ++iLoopCount );
	// #endif // TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_TLS_ATOMIC
  
	

	if( TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_LOCK_MUTEX )
	do
	{
		LaunchTestWithTaskManager( "TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_LOCK_MUTEX", NB_THREADS, Test::TEST_TYPE_INCREMENT_LOCK_MUTEX, 0, true );
	}	
	while( TEST_RUNFOREVER && ++iLoopCount );
	//#endif // TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_LOCK_MUTEX
  
	if( TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_LOCK_SPINLOCK )
	do
	{
		LaunchTestWithTaskManager( "TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_LOCK_SPINLOCK", NB_THREADS, Test::TEST_TYPE_INCREMENT_LOCK_SPINLOCK, 0, true );
	} 
	while( TEST_RUNFOREVER && ++iLoopCount );
	// endif // TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_LOCK_SPINLOCK
 
	if( TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_LOCK_RWLOCK )
	do
	{
		LaunchTestWithTaskManager( "TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_LOCK_RWLOCK", NB_THREADS, Test::TEST_TYPE_INCREMENT_LOCK_RWLOCK, 0, true ); 
	} 
	while( TEST_RUNFOREVER && ++iLoopCount );
	
	// endif TEST_TASK_MANAGER_SCHEDULING_CONCURRENT_ADD_LOCK_RWLOCK
  
	if( TEST_TASK_MANAGER_SCHEDULING_AND_MUTEX_AND_LOCKFREEQUEUE )
	do
	{ 		
		MSG("================================================================================\n");			
		MSG(" TEST_TASK_MANAGER_SCHEDULING_AND_MUTEX_AND_LOCKFREEQUEUE - TaskManager, LockFreeQueue and Mutex - Add \n\n" );
		MSG("=> Iteration %d\n", iLoopCount ); 
		MSG(" involved functions : \n");
		MSG(".TaskManager::BeginAddGroupTask\n");
		MSG(".TaskManager::AddTask\n");
		MSG(".TaskManager::EndAddGroupTask\n");
		MSG(".TaskManager::WaitOnTask\n");
		MSG("--------------------------------------------------------------------------------\n");

		list.clear();
		listPush.clear();
		listPop.clear();
		iValue = 0;


		pTaskManager->HardReset();
		{
			MEASURE_START(test);

			TaskID iGroupTaskID = pTaskManager->BeginAddGroupTask();

			for( int i = 0; i < NB_THREADS; i++ )
			{
				for( int k = 0; k < sQueue.GetCapacity() ; k++ )
				{
					Test::TestType type = ( k % 2 )? TEST_TYPE_LOCKFREEQUEUE_DEQUEUE : TEST_TYPE_LOCKFREEQUEUE_ENQUEUE;

					Task task;

					Test::BuildTask( type, (k + i * sQueue.GetCapacity()) / 2 , task );

					pTaskManager->AddTask( &task, iGroupTaskID );
				}
			}

			pTaskManager->EndAddGroupTask( iGroupTaskID );
			
			pTaskManager->WaitOnTask( iGroupTaskID );
			MEASURE_STOP_LOG(test,  "Time for Adding AND Processing Tasks  : ", 0 );
			
			vResults.push_back( BenchTestResult( GET_MEASURE(test), "TEST_TASK_MANAGER_SCHEDULING_AND_MUTEX_AND_LOCKFREEQUEUE") );
		}



		// Check results ........................................................................
		list.sort();
		INT32 iPrev = -2;
		INT32 iError = 0;
		INT32 i = 0;

		i = 0;
		for( std::list< INT32 >::iterator it = list.begin(); it != list.end(); it++ )
		{
			if( iPrev == (*it) )
			{
				iError++;
				printf( "Error at %d: value %d is not unique\n", i, iPrev );
			}

			iPrev = (*it);
			i++;
		}
		printf(" %d error(s) on %d entries\n", iError, list.size() );

		CHECK_ERROR( 0 == sQueue.GetSize(), 
			" Remaining integers in the queue = %d\n", sQueue.GetSize() ); 
		CHECK_ERROR( 0 == *list.begin(), 
			" First entry should be %d and is %d\n", 0, *list.begin());
		CHECK_ERROR( ((NB_THREADS * sQueue.GetCapacity() ) / 2 - 1) == *(list.rbegin()), 
			" Last entry should be %d and is %d\n", (NB_THREADS * sQueue.GetCapacity() ) / 2 - 1, *(list.rbegin()));

		CHECK_ERROR( listPush.size() == listPop.size(), 
			" Enqueued = %d; Dequeued = %d\n", listPush.size(), listPop.size() );
 
		for( std::list< INT32 >::iterator itPush = listPush.begin(); itPush != listPush.end(); itPush++ )
		{
			for( std::list< INT32 >::iterator itPop = listPop.begin(); itPop != listPop.end(); itPop++ )
			{
				if( (*itPush) == (*itPop) )
				{
					listPop.erase( itPop );
					break;
				}
			}
		}
		CHECK_ERROR( listPush.size() == ( NB_THREADS * sQueue.GetCapacity() / 2 ) && 0 == listPop.size(), 
			" After cleaning Enqueued = %d; Dequeued = %d\n", listPush.size(), listPop.size() );
 
		for( std::list< INT32 >::iterator itPop = listPop.begin(); itPop != listPop.end(); itPop++ )
		{
			printf( " value %d \n", *itPop);
		}   
	} 
	while( TEST_RUNFOREVER && ++iLoopCount );
	// endif TEST_TASK_MANAGER_SCHEDULING_AND_MUTEX_AND_LOCKFREEQUEUE


	////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	if( TEST_WORKERTHREAD_AND_MUTEX_AND_LOCKFREEQUEUE )
	do
	{    
		MSG("================================================================================\n");			
		MSG(" TEST_WORKERTHREAD_AND_MUTEX_AND_LOCKFREEQUEUE - WorkerThread, LockFreeQueue and Mutex - Add \n\n" );
		MSG("=> Iteration %d\n", iLoopCount ); 
		MSG(" involved functions : \n");
		MSG(".WorkerThread::AddWork\n");
		MSG(".WorkerThread::Run\n");
		MSG(".WorkerThread::Wait\n"); 
		MSG("--------------------------------------------------------------------------------\n");

		list.clear();
		listPush.clear();
		listPop.clear();
		iValue = 0;
		
		std::vector<Parallel::WorkerThread*> apThreads; 
		for(int i = 0; i < NB_THREADS; i++)
		{
			apThreads.push_back( new WorkerThread( sQueue.GetCapacity() ) );
			for( int k = 0; k < sQueue.GetCapacity() ; k++ )
			{
				Test::TestType type = ( k % 2 )? TEST_TYPE_LOCKFREEQUEUE_DEQUEUE : TEST_TYPE_LOCKFREEQUEUE_ENQUEUE;

				apThreads[ i ]->AddWork( new Test::TestWork( type , (k + i * sQueue.GetCapacity()) / 2 ) );
			}
		}
		// NDAL : This test, on Windows OS, in RELEASE, will take about 24.5s if WorkerThread is compiled without #define WORKER_THREAD_IMPL_LOCKFREE
		//        and only 19.5s if compiled with #define WORKER_THREAD_IMPL_LOCKFREE   !!!!!!!!!!
		MEASURE_START(test);
		for(int i = 0; i < NB_THREADS; i++)
		{
			apThreads[ i ]->Run();
		}
		for(int i = 0; i < NB_THREADS; i++)
		{
			apThreads[ i ]->Wait();
		}
		MEASURE_STOP_LOG( test,"Time consumed", 0 );
			
		vResults.push_back( BenchTestResult( GET_MEASURE(test), "TEST_WORKERTHREAD_AND_MUTEX_AND_LOCKFREEQUEUE") );

		for(int i = 0; i < NB_THREADS; i++)
		{
			delete apThreads[ i ];
		}
		apThreads.clear();

		list.sort();
		INT32 iPrev = -2;
		INT32 iError = 0;
		INT32 i = 0;

		i = 0;
		for( std::list< INT32 >::iterator it = list.begin(); it != list.end(); it++ )
		{
			if( iPrev == (*it) )
			{
				iError++;
				printf( "Error at %d: value %d is not unique\n", i, iPrev );
			}

			iPrev = (*it);
			i++;
		}
		printf(" %d error(s) on %d entries\n", iError, list.size() );
		printf("Queue size = %d\n", sQueue.GetSize() );

		printf( " first entry should be %d and is %d\n", 0, *list.begin());
		printf( " last entry should be %d and is %d\n", (NB_THREADS * sQueue.GetCapacity() ) / 2 - 1, *(list.rbegin()));
		printf(" Enqueued = %d; Dequeued = %d\n", listPush.size(), listPop.size() );
		for( std::list< INT32 >::iterator itPush = listPush.begin(); itPush != listPush.end(); itPush++ )
		{
			for( std::list< INT32 >::iterator itPop = listPop.begin(); itPop != listPop.end(); itPop++ )
			{
				if( (*itPush) == (*itPop) )
				{
					listPop.erase( itPop );
					break;
				}
			}
		}
		printf(" After cleaning Enqueued = %d; Dequeued = %d\n", listPush.size(), listPop.size() );
		for( std::list< INT32 >::iterator itPop = listPop.begin(); itPop != listPop.end(); itPop++ )
		{
			printf( " value %d \n", *itPop);
		}
	} 
	while( TEST_RUNFOREVER && ++iLoopCount );	
	// #endif	// TEST_WORKERTHREAD_AND_MUTEX_AND_LOCKFREEQUEUE
 
	if( TEST_PROFILING_TASK_MANAGER )
	do
	{
		UINT32 iSavedSleepTime = Test::iSleepTime;

		Test::iSleepTime = 0;//15;

		pTaskManager->HardReset();

		float fAddTime = 0;

		MB_ASSERT( ENABLE_PROFILING, "You forgot to compile the MeltingParallel lib with ENABLE_PROFILING set to 1\n" );

		Profiling::StatsID statsID = Profiling::BeginRecordTime( "TEST_PROFILING_TASK_MANAGER" );
		{ 
			MEASURE_START(test);

			// TASK - LAUNCH TASKS ! --------------------------------------------------------- 
			TaskID iBatchTaskID;
			{
				iBatchTaskID = pTaskManager->BeginAddGroupTask();
//					pTaskManager->SetDebugName( iBatchTaskID , "GROUP_TEST_TYPE_INCREMENT_ATOMIC_TLS" );
				{
					for( int k = 0; k < NB_THREADS * MAX_TEST_FACTOR ; k++ )
					{
						Task task;
						Test::BuildTask( Test::TEST_TYPE_DO_NOTHING, 0, task );

						TaskID iTaskID = pTaskManager->AddTask( &task
																, iBatchTaskID );
						
//							pTaskManager->SetDebugName( iTaskID , "TEST_TYPE_INCREMENT_ATOMIC_TLS" );
					}
				}
				pTaskManager->EndAddGroupTask( iBatchTaskID );	 
			}

			// TASK - FINISH THREAD GROUP - on Specific Affinity ---------------------------------------------------------
					// + 1 for the main thread !
			TaskID iMergeTaskID = pTaskManager->BeginAddGroupTask( nullptr, TaskManager::NO_TASK, iBatchTaskID );
			for( int i = 0; i < NB_THREADS; i++ )
			{
				UINT32 iSpecificAffinity = 1 << i;
			
				Task taskMerge;
				Test::BuildTaskMergeProfilingStats( taskMerge );

				TaskID iFinishTaskID = pTaskManager->AddTask( &taskMerge
															, iMergeTaskID
															, TaskManager::NO_TASK
															, TaskManager::UNDEFINED_PRIORITY
															, iSpecificAffinity );

//					pTaskManager->SetDebugName( iFinishTaskID , "TEST_TYPE_INCREMENT_ATOMIC_TLS_END" );
			} 
			pTaskManager->EndAddGroupTask( iMergeTaskID );
			
			MEASURE_STOP(test);

			fAddTime = GET_MEASURE(test);

			// in that case, the main thread cannot help the workers, because there is now way to use its TLS at the moment
			pTaskManager->WaitOnTask( iMergeTaskID, true );

			
			Test::iSleepTime = iSavedSleepTime;
		}
		Profiling::EndRecordTime( statsID );
		Profiling::MergeTLSStats(); // merge stats from main thread as well
		
		Profiling::SortStatsByThread();

		Profiling::StatsBuffer stats = Profiling::GetStats();

		std::vector< LARGE_INTEGER >	vDurations;
		vDurations.reserve( 1 + NB_THREADS );

		Parallel::ThreadID lastThreadID = -1;

		LARGE_INTEGER fMaxDuration;
		fMaxDuration.QuadPart = 0;


		for( UINT32 i = 0; i < stats.size(); i++ )
		{
			if( lastThreadID != stats[ i ].mThreadID )
			{
				lastThreadID = stats[ i ].mThreadID;

				LARGE_INTEGER duration;
				duration.QuadPart = 0;
				vDurations.push_back( duration );
			}

			LARGE_INTEGER duration;
			duration.QuadPart = ( stats[ i ].mEndTime.QuadPart - stats[ i ].mStartTime.QuadPart );

			fMaxDuration.QuadPart = max ( duration.QuadPart, fMaxDuration.QuadPart );

			vDurations[ vDurations.size() - 1 ].QuadPart += duration.QuadPart;
		} 

		// avoid making it toooo big
		Profiling::ClearStats();

		MSG( "\n" );
		MSG( "\n" );
		for( UINT32 i = 0; i < vDurations.size(); i++ )
		{
			MSG( "Duration %d : %f \n", i, ( vDurations[ i ].QuadPart ) / (double) Profiling::GetSystemFrequency().QuadPart );
		}
		MSG( "\n" );
		MSG(" Add Time : %f\n", fAddTime );
		MSG( "\n" );
	} 
	while( TEST_RUNFOREVER && ++iLoopCount );
	// #endif	// TEST_PROFILING_TASK_MANAGER
	


	if( nullptr != pTaskManager )
	{
		delete pTaskManager;
		pTaskManager = nullptr;
	}
	
	if( nullptr != gInts )
	{
		delete [] gInts;
		gInts = nullptr;
	}


	MSG( "\n#######################################################################\n" ); 

	MSG( " Infos summary : \n\n" );

	MSG( "	System w/ %d CPU cores \n\n", Parallel::GetNbCores() );
	MSG( "	Nb 'tasks' : %d \n", MAX );
	MSG( "	Nb (worker) threads : %d \n", NB_THREADS + 1 );
	MSG( "	Average work load per thread : %d tasks \n", MAX / NB_THREADS );

	MSG( "\n=======================================================================\n" );

	std::sort( vResults.begin(), vResults.end(), SortFunc );

	MSG( " Results summary : \n\n" );
	for( int i = 0; i < vResults.size(); i++ )
	{
		MSG( ".%s : %f s \n    (i.e. %f per task)\n", vResults[ i ].mstrName, vResults[ i ].mfDuration, vResults[ i ].mfDuration / MAX  );
	}
	MSG( "\n" );
	
	if( PRINT_RESULTS_AS_DUMBLINES )
	{ 
		MSG( "For SpreadSheets:\n" );
		for( int i = 0; i < vResults.size(); i++ )
		{
			MSG( ".%s\n%f\n%f\n\n", vResults[ i ].mstrName, vResults[ i ].mfDuration, vResults[ i ].mfDuration / MAX  );
		}
		MSG( "\n" );
	} 

	if( PRINT_RESULTS_AS_HTML )
	{
		MSG( "<table border= \"1\" width=\"500\">\n" );
		MSG( "<tr><td>Test</td><td>Total time (s)</td><td>Time per task (s)</td></tr>\n" );
		for( int i = 0; i < vResults.size(); i++ )
		{
			MSG( "<tr><td>%s</td><td>%f</td><td>%f</td></tr>\n", vResults[ i ].mstrName, vResults[ i ].mfDuration, vResults[ i ].mfDuration / MAX  );
		}
		MSG( "</table>\n" );
	}
 

	MSG( "\n#######################################################################\n" );


	char c;
	MSG( "Press a key to exit. ");
	getchar(); //( "%c", &c );

    return 0;
}
