#ifndef __GLF_MEM_TLSFALLOCATOR_H_INCLUDED__
#define __GLF_MEM_TLSFALLOCATOR_H_INCLUDED__

#include <glf/core/bits.h>

namespace glf {

// Two Level Segregated Fit Allocator.
// 
// The TLSF data structure can be represented as a two-dimensional array. The fist dimension 
// splits free blocks in size-ranges a power of two apart from each other, so that first-
// level index i refers to free blocks of sizes in the range [2^i,2^(i+1)[. The second 
// dimension splits each first-level range linearly in a number of ranges of an equal width. 
// The number of such ranges, 2^L, should not exceed the number of bits of the underlying 
// architecture, so that a one-word bitmap can represent the availability of free blocks in 
// all the ranges. 
//
// This implementation use L = 5.
//
// This implementation has the particularity of storing its internal information outside
// of the allocator arena. While this make the implemantion slower, it was required for 
// my current usage (the arena is represented by a file). It could be easily modified to 
// support a bool template argument for a fully optimized version which use the arena to 
// store internal information.
//
// http://wks.gii.upv.es/tlsf/files/MRBC_2008.pdf
class TlsfAllocator
{
private:

	// Internal node to organize flee block
	struct NodeType
	{
		size_t mSize;
		NodeType* mNext;
		NodeType* mPrev;
		bool mUsed;
		void* mPtr;

		NodeType()
			: mPtr(NULL)
			, mSize(0)
			, mNext(NULL)
			, mPrev(NULL)
			, mUsed(false)
		{
		}

		NodeType(void* ptr, size_t size)
			: mPtr(ptr)
			, mSize(size)
			, mNext(NULL)
			, mPrev(NULL)
			, mUsed(false)
		{
		}
	};

	typedef std::list<NodeType*> ListType;
	typedef std::map<void*, NodeType*> PoolType;

	// L = 5 since I use 32 bits variables, but this could be deduced from template 
	// argument in case we need to use 16/64 bits variables.
	static const unsigned long L  = 5u;
	static const unsigned long TL = 32u; // 2^L
	
	unsigned long mFlBitmap;
	unsigned long mSlBitmaps[TL];

	ListType mList[TL][TL];
	PoolType mPool;

	// Find the first level and second level value for inserting specific block size. 
	//
	// *** NOTE: Difference between MappingInsert and MappingSearch:
	//
	//           The second level for block size [64,128) give values as folow for each 
	//           index: 64, 66, 68.... Block inserted under the 64 index must be
	//           of size of at least 64, so 64 and 65 block size goes under this index.
	//
	//           When allocating (searching) a block of size 65, the block must guaranty
	//           a size of at least 65. Even if we inserted a 65 size block at index 0,
	//           the only way to guaranty a size of 65 is tu use free block of size 66
	//           at index 1 (or above).
	inline void MappingInsert(unsigned long size, unsigned long* fl, unsigned long* sl)
	{
		GLF_ASSERT(size >= TL);

		*fl = Fls(size);
		*sl = (size >> (*fl - L)) - TL;
	}

	// Find the first level and second level value for searching specific block size. 
	//
	// *** NOTE: See MappingInsert's note.
	void MappingSearch(unsigned long size, unsigned long& fl, unsigned long& sl)
	{
		size = size + (1 << (Fls(size) - L)) - 1;
		fl = Fls(size);
		sl = (size >> (fl - L)) - TL;
	}

	// Insert a block of memory into the data structure.
	inline void InsertBlock(NodeType* node)
	{
		// Based on size, get the first and second mapping level
		unsigned long fl, sl;
		MappingInsert(node->mSize, &fl, &sl);

		// Set the bitmaps
		mFlBitmap |= 1 << fl;
		mSlBitmaps[fl] |= 1 << sl;

		// Insert the node in the list
		ListType& list = mList[fl][sl];
		GLF_ASSERT(find(list.begin(), list.end(), node) == list.end());
		list.push_front(node);

		// Make sure everything is okay !
		GLF_ASSERT(SanityCheck());
	}

	// Remove a block of memory from the data structure.
	inline void RemoveBlock(NodeType* node, unsigned long fl, unsigned long sl)
	{
#ifdef GLF_DEBUG
		unsigned long flCheck, slCheck;
		MappingInsert(node->mSize, &flCheck, &slCheck);

		GLF_ASSERT(fl == flCheck && sl == slCheck);
#endif

		// Remove the node from the list
		ListType& list = mList[fl][sl];
		GLF_ASSERT(find(list.begin(), list.end(), node) != list.end());
		list.remove(node);

		// Set the bitmaps
		mSlBitmaps[fl] &= ~((list.empty() == true) << sl);
		mFlBitmap &= ~((mSlBitmaps[fl] == 0) << fl);

		// Make sure everything is okay !
		GLF_ASSERT(SanityCheck());
	}

	// Merge contiguous blocks
	inline NodeType* MergePrev(NodeType* node)
	{
		NodeType* prev = node->mPrev;
		if(prev && !prev->mUsed)
		{
			unsigned long fl, sl;
			MappingInsert(prev->mSize, &fl, &sl);

			RemoveBlock(prev, fl, sl);

			prev->mSize += node->mSize;
			prev->mNext = node->mNext;
			if(node->mNext) 
			{
				node->mNext->mPrev = prev;
			}

			mPool.erase(node->mPtr);
			delete node;
			node = prev;
		}

		return node;
	}

	// Merge contiguous blocks
	inline void MergeNext(NodeType* node)
	{
		// merge next
		NodeType* next = node->mNext;
		if(next && !next->mUsed)
		{
			unsigned long fl, sl;
			MappingInsert(next->mSize, &fl, &sl);

			RemoveBlock(next, fl, sl);

			node->mSize += next->mSize;
			node->mNext = next->mNext;
			if(next->mNext)
			{
				next->mNext->mPrev = node;
			}

			mPool.erase(next->mPtr);
			delete next;
		}
	}

#ifdef GLF_DEBUG
	// Do as much check as possible to make sure everything is okay (useful for debugging).
	bool SanityCheck()
	{
		// Chech level and free list
		for(unsigned long flCheck = 0; flCheck < TL; ++flCheck)
		{
			for(unsigned long slCheck = 0; slCheck < TL; ++slCheck)
			{
				if(mFlBitmap & (1 << flCheck) && mSlBitmaps[flCheck] & (1 << slCheck))
				{
					if(mList[flCheck][slCheck].empty())
					{
						GLF_ASSERT(false);
					}
				}
			}
		}

		// Check block list
		size_t count = 0; 
		for(PoolType::iterator i = mPool.begin(); i != mPool.end(); ++i)
		{
			// Is this the start of an arena ?
			if(i->second->mPrev == NULL)
			{
				++count;

				// Get new list head
				NodeType* k = i->second;

				// Check edge
				GLF_ASSERT(k->mPrev == NULL);

				// Count list
				NodeType* before = k;
				while(k->mNext)
				{
					++count;
					k = k->mNext;

					// Corrupt list
					GLF_ASSERT(before == k->mPrev);
					before = k;
				
					// Check if two free block are adjascent.
					// This can momentarly happens when merging block... find better check !
					//GLF_ASSERT(before->mUsed || i->mUsed);
				}

			}
		}
		GLF_ASSERT(mPool.size() == count);

		return true;
	}
#endif

public:
	
	// Initializes the allocator object.
	TlsfAllocator()
		: mFlBitmap(0)
	{
		memset(mSlBitmaps, 0, TL * sizeof(unsigned long));
	}

	// Releases all resources used by the allocator object.
	~TlsfAllocator()
	{
		// Make sure there is no leak...
		GLF_ASSERT(mFlBitmap == 0);
		GLF_ASSERT(mPool.empty());

		for(int fl = 0; fl < TL; ++fl)
		{
			GLF_ASSERT(mSlBitmaps[fl] == 0);

			for(int sl = 0; sl < TL; ++sl)
			{
				GLF_ASSERT(mList[fl][sl].empty());
			}
		}
	}

	// Insert an arena (a unstructured blob of memory).
	void InsertArena(void* ptr, size_t size)
	{
		GLF_ASSERT(size >= TL);

		NodeType* node = new NodeType(ptr, size);
		mPool.insert(PoolType::value_type(ptr, node));
		InsertBlock(node);

		GLF_ASSERT(SanityCheck());
	}

	// Remove an arena.
	void RemoveArena(void* ptr)
	{
		NodeType* node = mPool[ptr];
		GLF_ASSERT(node);

		// Based on size, get the first and second mapping level
		unsigned long fl, sl;
		MappingInsert(node->mSize, &fl, &sl);

		RemoveBlock(node, fl, sl);
		mPool.erase(ptr);
		delete node;

		GLF_ASSERT(SanityCheck());
	}
		
	// Memory allocation.
	void* Alloc(size_t size)
	{
		size = std::max(size, (size_t)TL);  // Size must be at least TL
		size = ((size + 3) & ~3);   // Size must be a multiple of 4 
		
		//
		// First, find a suitable block
		//

		// Get the minimum fl and sl value.
		unsigned long fl, sl;
		MappingSearch(size, fl, sl);

		// check if the fl value is okay, and if so, make sure the sl value is okay or bigger
		unsigned long bitmap = mSlBitmaps[fl] & (0xFFFFFFFF << sl);
		if(bitmap)
		{
			sl = Ffs(bitmap);
		}
		else
		{
			// Otherwise, find next free block !
			bitmap = mFlBitmap & (0xFFFFFFFF << (fl + 1));
			if(bitmap)
			{
				fl = Ffs(bitmap);
				sl = Ffs(mSlBitmaps[fl]);
			}
			else
			{
				return NULL;
			}
		}

		//
		// Grab the block
		//
		
		NodeType* node = mList[fl][sl].front();
		RemoveBlock(node, fl, sl);
		node->mUsed = true;
		char* block = (char*)node->mPtr;

		//
		// Is the block big enough so that it can be splitted ?
		//
		
		size_t remaining = node->mSize - size;
		if(remaining >= TL)
		{
			node->mSize = size;
			NodeType* nextNode = new NodeType(block + size, remaining);
		
			nextNode->mNext = node->mNext;
			nextNode->mPrev = node;

			if(node->mNext)
			{
				node->mNext->mPrev = nextNode;
			}
			
			node->mNext = nextNode;
				
			mPool.insert(PoolType::value_type(nextNode->mPtr, nextNode));
			InsertBlock(nextNode);
		}

		GLF_ASSERT(SanityCheck());

		return block;
	}

	// Memory deallocation.
	void Free(void* p)
	{
		if(p == NULL)
		{
			return;
		}

		NodeType* node = mPool[p];
		GLF_ASSERT(node);

		node->mUsed = false;

		node = MergePrev(node);
		MergeNext(node);

		InsertBlock(node);

		GLF_ASSERT(SanityCheck());
	}
	
};

} // end namespace glf

#endif // __GLF_MEM_TLSFALLOCATOR_H_INCLUDED__
