//	--------------------------------------------------------------------
//	Copyright(C) 2006,2007 Zhao Yukun. All rights reserved.
//	This file is a part of the Xeres system.
//	Xeres 2007
//	Contact : xeres.engine@gmail.com
//	$(reservedInfo)
//	$(reservedDeclaration)
//	--------------------------------------------------------------------
///	@file	<mspace.cpp>
///	@path	~/src/lib/memory/
///	@date	2007/11/18
///	@desc	'mspace' memory manager.

#include "config/config.h"

#include "lib/memory/mspace.h"

// turn off memory manager while debugging
#if ( !XERES_CONFIG_USE_MSPACE )
#	pragma message("Xeres : Turning off mspace memory manager.")
#	undef	XERES_CONFIG_USE_MSPACE
#	define XERES_CONFIG_USE_MSPACE 0
#endif

#if (XERES_CONFIG_USE_MSPACE)
#	pragma message("Xeres : Using mspace memory manager." )
#endif

#if XERES_CONFIG_USE_MSPACE
#define NO_MSP_RUN( X )
#define	MSP_RUN( X )		X
#else
#define	NO_MSP_RUN( X )		X
#define MSP_RUN( X )
#endif

namespace xeres {

#ifndef _DOC_GEN

	/*!
		\struct Field
		\brief additional field for extra information with memory.
	*/
	struct Field
	{
		// magic number
		enum {
			TAG		= 0xef396700 ,
			MASK	= 0xffffff00
		};

		enum {
			ADD_UNALIGNED = 'u' ,
			ADD_ALIGNED = 'a' ,
		};

		union
		{
			struct {
				byte info_heading[3];
				byte info_aligned;
			};
			int info_tag;
		};

		NO_MSP_RUN( uint32 size );

		Field( void )
		{
			info_tag = TAG;
			info_aligned = 0;
			NO_MSP_RUN( size = 0xffffffff );
		}

		// set aligned
		void set_aligned( bool aligned ) {
			if( aligned )
				info_aligned = ADD_ALIGNED;
			else
				info_aligned = ADD_UNALIGNED;
		}

		// set size
		void set_size( size_t sz ) {
			NO_MSP_RUN( assert( size == 0xffffffff && ( sz < 0xffffffff ) ) );
			NO_MSP_RUN( size = (uint32)sz );
		}

		// verify field
		bool verify( void ) const {
			return (
				( ( info_tag & MASK ) == TAG ) &&
				( info_aligned == ADD_UNALIGNED || info_aligned == ADD_ALIGNED ) );
		}

		// test if field is aligned
		bool aligned( void ) const {
			return info_aligned == ADD_ALIGNED;
		}

		// test if field is unaligned
		bool unaligned( void ) const {
			return info_aligned == ADD_UNALIGNED;
		}
	};

	// mspace implementation
	namespace
	{

#define MLOCK_T				void

		static inline void win32_init_lock( MLOCK_T * ) {
		}
		static inline int win32_acquire_lock( MLOCK_T * ) {
			return 0;
		}
		static inline void win32_release_lock( MLOCK_T * ) {
		}

		typedef void * __mspace;

		typedef bool (*mspace_walkfunc_t)( void * ptr , size_t blocksize , void * data );

#ifndef __ICL
		__mspace create_mspace(size_t capacity, int locked);
		size_t destroy_mspace(__mspace msp);
		__mspace create_mspace_with_base(void* base, size_t capacity, int locked);
		//
		void* mspace_malloc(__mspace msp, size_t bytes);
		void mspace_free(__mspace msp, void* mem);
		void* mspace_realloc(__mspace msp, void* mem, size_t newsize);
		void* mspace_calloc(__mspace msp, size_t n_elements, size_t elem_size);
		void* mspace_memalign(__mspace msp, size_t alignment, size_t bytes);
		void** mspace_independent_calloc(__mspace msp, size_t n_elements,
			size_t elem_size, void* chunks[]);
		void** mspace_independent_comalloc(__mspace msp, size_t n_elements,
			size_t sizes[], void* chunks[]);
		//
		size_t mspace_footprint(__mspace msp);
		struct mallinfo mspace_mallinfo(__mspace msp);
		void mspace_malloc_stats(__mspace msp , size_t& maxfp , size_t& fp , size_t& used);
		void mspace_malloc_workthough(__mspace msp , mspace_walkfunc_t  fnc , void * data );
		int mspace_trim(__mspace msp, size_t pad);
		int mspace_mallopt(int, int);
#endif

#define USE_LOCKS			0
#define MSPACES				1
#define MALLOC_ALIGNMENT	(16)
#define	ONLY_MSPACES		1

#ifndef _DEBUG
#	define	INSECURE		1
#else
#	define	DEBUG			1
#endif

#pragma warning( disable:4127 ; disable:4702 ; disable:4267 )

#include "mspace.inl"
	}

	static inline void * x_malloc(__mspace msp, size_t bytes)
	{
#if ( XERES_CONFIG_USE_MSPACE )
		return mspace_malloc( msp , bytes );
#else
		return malloc( bytes ); (msp);
#endif
	}
	static inline void x_free(__mspace msp, void* mem)
	{
#if ( XERES_CONFIG_USE_MSPACE )
		return mspace_free( msp , mem );
#else
		return free( mem ); (msp);
#endif
	}
	static inline void * x_realloc(__mspace msp, void* mem, size_t newsize)
	{
#if ( XERES_CONFIG_USE_MSPACE )
		return mspace_realloc( msp , mem , newsize );
#else
		return realloc( mem , newsize ); (msp);
#endif
	}
	static inline void * x_malloc_aligned( __mspace msp, size_t alignment, size_t bytes )
	{
#if ( XERES_CONFIG_USE_MSPACE )
		return mspace_memalign( msp , alignment , bytes );
#else
		return _aligned_malloc( bytes , alignment ); (msp);
#endif
	}
	static inline void x_free_aligned( __mspace msp, void *  mem )
	{
#if ( XERES_CONFIG_USE_MSPACE )
		mspace_free( msp , mem );
#else
		_aligned_free( mem ); (msp);
#endif
	}
	static inline void * x_realloc_aligned( __mspace msp , void * mem , size_t newsize , size_t alignment )
	{
#if ( XERES_CONFIG_USE_MSPACE )
		return mspace_realloc( msp , mem , newsize );(alignment);
#else
		return _aligned_realloc( mem , newsize , alignment ); (msp);
#endif
	}

#endif /*_DOC_GEN*/

	mspace::mspace( size_t capacity )
		: m_mspace( NULL )
	{
		MSP_RUN( m_mspace = create_mspace( capacity , false ) );
	}
	// dtor
	mspace::~mspace( void )
	{
		walkthough();
		MSP_RUN( destroy_mspace( m_mspace ) );
	}

#if ( XERES_CONFIG_USE_MSPACE )
#define	MEM_OVERHEAD			( (size_t)(chunk2mem(0)) )
#else
#define	MEM_OVERHEAD			( 0 )
#endif

#define	UNALIGNED_MEMBOOK		( MEM_OVERHEAD + sizeof(Field) )
#define	ALIGNED_MEMBOOK			( MEM_OVERHEAD + ALIGN_SIZE )

	// allocate
	void * mspace::malloc( size_t n )
	{
		Field field;
		field.set_aligned( false );
		// real size
		size_t realsize = n + sizeof(Field);
		byte * ptr = static_cast<byte*>( x_malloc( m_mspace , realsize ) );
		void * outptr = ptr + sizeof(Field);
		// add pointer information
		Field * f = (Field*)(ptr);
		*f = field;
		NO_MSP_RUN( f->set_size( realsize ) );
		MSP_RUN( realsize = chunksize(mem2chunk(ptr)) );
		MSP_RUN( assert( cinuse( mem2chunk( ptr ) ) ) );
		MSP_RUN( if( is_mmapped(mem2chunk(ptr) ) ) )
		{
			m_largeSpace.insert( ptr );
		}
		assert( realsize > UNALIGNED_MEMBOOK );
		return outptr;
	}
	// allocate aligned
	void * mspace::malloc_aligned( size_t n )
	{
		Field field;
		field.set_aligned( true );
		// real size
		size_t realsize = ALIGNOF( n + ALIGN_SIZE , ALIGN_SIZE );
		byte * ptr = static_cast<byte*>(x_malloc_aligned( m_mspace , ALIGN_SIZE , realsize ));
		memset( ptr , 0 , ALIGN_SIZE );
		void * outptr = ptr + ALIGN_SIZE;
		// add pointer information
		Field * f = (Field*)(ptr);
		*f = field;
		NO_MSP_RUN( f->size = realsize );
		MSP_RUN( realsize = chunksize(mem2chunk(ptr)) );
		MSP_RUN( assert( cinuse( mem2chunk( ptr ) ) ) );
		// trace large memory block
		MSP_RUN( if( is_mmapped(mem2chunk(ptr) ) ) )
		{
			m_largeSpace.insert( ptr );
		}
		assert( realsize > ALIGNED_MEMBOOK );
		return outptr;
	}
	// realloc
	void * mspace::realloc( void * p , size_t n )
	{
		byte * ptr = static_cast<byte*>(p);
		Field * field = (Field*)( ptr - sizeof(Field) );
		Field fsave;
		if( field->verify() && field->unaligned() )
		{
			fsave = *field;
			MSP_RUN( size_t oldsize = chunksize( mem2chunk( field ) ) );
			NO_MSP_RUN( size_t oldsize = field->size );
			// realsize
			size_t realsize = n + sizeof(Field);
			// trace large memory block
			MSP_RUN( if( is_mmapped(mem2chunk(field) ) ) )
			{
				m_largeSpace.erase( field );
			}
			// reallocate
			ptr = static_cast<byte*>( x_realloc( m_mspace , field , realsize ) );
			void * outptr = ptr + sizeof(Field);
			// add pointer information
			Field * f = (Field*)ptr;
			*f = fsave;
			NO_MSP_RUN( f->size = realsize );
			MSP_RUN( realsize = chunksize( mem2chunk(ptr) ) );
			MSP_RUN( assert( cinuse( mem2chunk( ptr ) ) ) );
			// trace large memory block
			MSP_RUN( if( is_mmapped(mem2chunk(ptr) ) ) )
			{
				m_largeSpace.insert( ptr );
			}
			assert( realsize > UNALIGNED_MEMBOOK );
			return outptr;
		}
		else
		{
			field = (Field*)( ptr - ALIGN_SIZE );
			assert( field->verify() && field->aligned() );

			fsave = *field;
			MSP_RUN( size_t oldsize = chunksize( mem2chunk( field ) ) );
			NO_MSP_RUN( size_t oldsize = field->size );
			// realsize
			size_t realsize = ALIGNOF( n + ALIGN_SIZE , ALIGN_SIZE );
			// trace large memory block
			MSP_RUN( if( is_mmapped(mem2chunk(field) ) ) )
			{
				m_largeSpace.erase( field );
			}
			// reallocate
			ptr = static_cast<byte*>( x_realloc_aligned( m_mspace , field , realsize , ALIGN_SIZE ) );
			memset( ptr , 0 , ALIGN_SIZE );
			void * outptr = ptr + ALIGN_SIZE;
			// add pointer information
			Field * f = (Field*)(ptr);
			*f = fsave;
			NO_MSP_RUN( f->size = realsize );
			MSP_RUN( realsize = chunksize( mem2chunk(ptr) ) );
			MSP_RUN( assert( cinuse( mem2chunk( ptr ) ) ) );
			// trace large memory block
			MSP_RUN( if( is_mmapped(mem2chunk(ptr) ) ) )
			{
				m_largeSpace.insert( ptr );
			}
			assert( realsize > ALIGNED_MEMBOOK );
			return outptr;
		}
	}
	// free
	void mspace::free( void * p )
	{
		byte * ptr = static_cast<byte*>( p );
		Field * field = (Field*)( ptr - sizeof(Field) );
		Field fsave;
		if( field->verify() && field->unaligned() )
		{
			MSP_RUN( assert( cinuse( mem2chunk( field ) ) ) );
			MSP_RUN( size_t old_size = chunksize( mem2chunk(field) ) );
			NO_MSP_RUN( size_t old_size = field->size );
			// trace large memory block
			MSP_RUN( if( is_mmapped(mem2chunk(field) ) ) )
			{
				assert( m_largeSpace.find( field ) != m_largeSpace.end() );
				m_largeSpace.erase( field );
			}
			x_free( m_mspace , field );
		}
		else
		{
			field = (Field*)( ptr - ALIGN_SIZE );
			assert( field->verify() && field->aligned() );

			MSP_RUN( assert( cinuse( mem2chunk( field ) ) ) );
			MSP_RUN( size_t old_size = chunksize( mem2chunk(field) ) );
			NO_MSP_RUN( size_t old_size = field->size );
			// trace large memory block
			MSP_RUN( if( is_mmapped(mem2chunk(field) ) ) )
			{
				assert( m_largeSpace.find( field ) != m_largeSpace.end() );
				m_largeSpace.erase( field );
			}
			x_free_aligned( m_mspace , field );
		}
	}

	// walk though
	struct walker_data
	{
		class mspace * msp;
	};

	void mspace::walkthough( void )
	{
		walker_data wdata = { this };
		MSP_RUN( mspace_malloc_workthough( m_mspace , internal_walker , &(wdata) ) );
		std::set< void * >::iterator it , end;
		for( it = m_largeSpace.begin() , end = m_largeSpace.end() ; it != end ; ++it )
		{
			void * ptr = *it;
			MSP_RUN( size_t old_size = chunksize(mem2chunk(ptr)) );
			NO_MSP_RUN( size_t old_size = 0 );
			internal_walker( ptr , old_size , &(wdata) );
			MSP_RUN( x_free( m_mspace , ptr ) );
		}
	}

	bool mspace::internal_walker( void * ptr , size_t blocksize , void * data )
	{
		Field * field = ((Field*)ptr);
		if( !field->verify() )
		{
			MSP_RUN( field = NULL );
		}
		NO_MSP_RUN( assert(field) );
		NO_MSP_RUN( blocksize = field->size );
		return release_walk( ptr , field , blocksize , data );
	}

	inline bool mspace::release_walk( void * ptr , Field * field , size_t size , void * data )
	{
		NO_MSP_RUN( walker_data * wdata =  (walker_data*)data );
		MSP_RUN( if( field ) )
		{
			if( field->unaligned() )
			{
				NO_MSP_RUN( x_free( wdata->msp->m_mspace , field ) );
			}
			else
			{
				NO_MSP_RUN( x_free_aligned( wdata->msp->m_mspace , field ) );
			}
			NO_MSP_RUN( return true );
		}
		MSP_RUN( else )
		{
			NO_MSP_RUN( assert( false ) );
			NO_MSP_RUN( x_free( wdata->msp->m_mspace , field ) );
		}
		return true;
		(ptr);(data);
	}

} // namespace xeres
