//==============================================================================
// Copyright (c) 2008-2013 Niall Ryan. All Rights Reserved.
//==============================================================================

#ifndef HYDRA_HASHMAPS_REFINABLEHASHMAP_H
#define HYDRA_HASHMAPS_REFINABLEHASHMAP_H 1

#include "hydra/allocators/DelayedFreeAllocator.h"
#include "hydra/atomic/Atomic.h"
#include "hydra/hashutil.h"
#include "hydra/locks/TTASLock.h"

namespace Hydra
{
	/**
	 * Hash map where the buckets are protected by a set of striped locks. Both the array of buckets and the array of locks will grow
	 * as the load factor increases.
	 */
	template<typename K, typename T, typename Hash=HashUtil::HashFunction<K>, typename Pred=HashUtil::EqualTo<K>, typename Lock=TTASLock, typename Allocator=DelayedFreeAllocator>
	class RefinableHashMap
	{
	public:
        static_assert(!Allocator::kAbaVulnerable, "Allocator must not be vulnerable to the ABA problem");

		RefinableHashMap(unsigned int numBuckets=16, unsigned int numLocks=4);
		~RefinableHashMap();
		bool insert(const K& key, const T& value);
		bool erase(const K& key);
		bool contains(const K& key);
		bool find(const K& key, T* value);
		void setMaxLoadFactor(float maxLoadFactor) { m_maxLoadFactor = maxLoadFactor; }
		float getLoadFactor() const     { return static_cast<float>(m_size) / static_cast<float>(m_numBuckets); }
		unsigned int getSize() const { return m_size; }
		bool isEmpty() const         { return (m_size == 0); }

	private:
		void grow();
		Lock* acquireLock(unsigned int hashValue);

		struct Pair
		{
			Pair(const K& key, const T& value) : m_key(key), m_value(value) { }
			K m_key;
			T m_value;
		};
		typedef std::vector<Pair> Bucket;

		Allocator m_alloc;
		Atomic<bool> m_isResizing;
		Atomic<unsigned int> m_size;
		float m_maxLoadFactor;

		Atomic<unsigned int> m_numLocks;
		Atomic<Lock*> m_locks;

		unsigned int m_numBuckets;
		Bucket* m_buckets;
	};

	//==================================================================================================
	//==================================================================================================
	//==================================================================================================

	template<typename K, typename T, typename Hash, typename Pred, typename Lock, typename Allocator>
	inline RefinableHashMap<K, T, Hash, Pred, Lock, Allocator>::RefinableHashMap(unsigned int numBuckets, unsigned int numLocks)
	{
		assert((numBuckets&(numBuckets-1)) == 0);
		assert((numLocks&(numLocks-1)) == 0);
		assert(numLocks<=numBuckets);
		m_isResizing = false;
		m_size = 0;
		m_maxLoadFactor = 0.75f;
		m_numLocks = numLocks;
		m_locks = m_alloc.createArray<Lock>(m_numLocks);
		m_numBuckets = numBuckets;
		m_buckets = new Bucket[m_numBuckets];
	}

	template<typename K, typename T, typename Hash, typename Pred, typename Lock, typename Allocator>
	inline RefinableHashMap<K, T, Hash, Pred, Lock, Allocator>::~RefinableHashMap()
	{
		m_alloc.destroyArray<Lock>(m_locks, m_numLocks);
		delete[] m_buckets;
	}

	template<typename K, typename T, typename Hash, typename Pred, typename Lock, typename Allocator>
	inline Lock* RefinableHashMap<K, T, Hash, Pred, Lock, Allocator>::acquireLock(unsigned int hashValue)
	{
		while( true )
		{
			while( m_isResizing )
			{
			}
			unsigned int oldNumLocks = m_numLocks; //read numLocks before lock pointer, since numLocks only increases we won't read past the array
			Lock* oldLocks = m_locks;
			Lock* lock = &oldLocks[hashValue & (oldNumLocks-1)];
			lock->lock();
			bool isResizing = m_isResizing;
			if( !isResizing && (m_locks == oldLocks) )
				return lock;
			lock->unlock();
		}
	}

	template<typename K, typename T, typename Hash, typename Pred, typename Lock, typename Allocator>
	inline bool RefinableHashMap<K, T, Hash, Pred, Lock, Allocator>::insert(const K& key, const T& value)
	{
		Hash hash;
		unsigned int hashValue = hash(key);
		Lock* lock = acquireLock(hashValue);
		Bucket& bucket = m_buckets[hashValue & (m_numBuckets-1)];
		for( Bucket::const_iterator iter=bucket.begin(); iter!=bucket.end(); ++iter )
		{
			Pred pred;
			if( pred(iter->m_key, key) )
			{
				lock->unlock();
				return false;
			}
		}
		bucket.push_back(Pair(key, value));
		lock->unlock();
		++m_size;
		if( getLoadFactor() > m_maxLoadFactor )
			grow();
		return true;
	}

	template<typename K, typename T, typename Hash, typename Pred, typename Lock, typename Allocator>
	inline bool RefinableHashMap<K, T, Hash, Pred, Lock, Allocator>::erase(const K& key)
	{
		Hash hash;
		unsigned int hashValue = hash(key);
		Lock* lock = acquireLock(hashValue);
		Bucket& bucket = m_buckets[hashValue & (m_numBuckets-1)];
		for( Bucket::iterator iter=bucket.begin(); iter!=bucket.end(); ++iter )
		{
			Pred pred;
			if( pred(iter->m_key, key) )
			{
				*iter = bucket.back();
				bucket.pop_back();
				lock->unlock();
				--m_size;
				return true;
			}
		}
		lock->unlock();
		return false;
	}

	template<typename K, typename T, typename Hash, typename Pred, typename Lock, typename Allocator>
	inline bool RefinableHashMap<K, T, Hash, Pred, Lock, Allocator>::contains(const K& key)
	{
		Hash hash;
		unsigned int hashValue = hash(key);
		Lock* lock = acquireLock(hashValue);
		Bucket& bucket = m_buckets[hashValue & (m_numBuckets-1)];
		for( Bucket::const_iterator iter=bucket.begin(); iter!=bucket.end(); ++iter )
		{
			Pred pred;
			if( pred(iter->m_key, key) )
			{
				lock->unlock();
				return true;
			}
		}
		lock->unlock();
		return false;
	}

	template<typename K, typename T, typename Hash, typename Pred, typename Lock, typename Allocator>
	inline bool RefinableHashMap<K, T, Hash, Pred, Lock, Allocator>::find(const K& key, T* value)
	{
		Hash hash;
		unsigned int hashValue = hash(key);
		Lock* lock = acquireLock(hashValue);
		Bucket& bucket = m_buckets[hashValue & (m_numBuckets-1)];
		for( Bucket::const_iterator iter=bucket.begin(); iter!=bucket.end(); ++iter )
		{
			Pred pred;
			if( pred(iter->m_key, key) )
			{
				*value = iter->m_value;
				lock->unlock();
				return true;
			}
		}
		lock->unlock();
		return false;
	}

	template<typename K, typename T, typename Hash, typename Pred, typename Lock, typename Allocator>
	inline void RefinableHashMap<K, T, Hash, Pred, Lock, Allocator>::grow()
	{
		unsigned int oldNumBuckets = m_numBuckets;

		if( !m_isResizing.getAndSet(true) )
		{
			//abort if another thread has already resized
			if( oldNumBuckets != m_numBuckets )
			{
				m_isResizing = false;
				return;
			}

			//quiesce
			unsigned int numLocks = m_numLocks;
			Lock* locks = m_locks;
			for( unsigned int i=0; i<numLocks; ++i )
			{
				Lock& lock = locks[i];
				while( lock.isLocked() )
				{
				}
			}

			//create the new set of buckets, twice as many
			Bucket* oldBuckets = m_buckets;
			m_numBuckets *= 2;
			m_buckets = new Bucket[m_numBuckets];

			//rehash the items into the new buckets.
			// possible optimization - power of 2 bucket count means each bucket splits into 2 new buckets, could reduce copying
			Hash hash;
			for( unsigned int oldBucketIndex=0; oldBucketIndex<oldNumBuckets; ++oldBucketIndex )
			{
				Bucket& oldBucket = oldBuckets[oldBucketIndex];
				for( Bucket::const_iterator iter=oldBucket.begin(); iter!=oldBucket.end(); ++iter )
				{
					unsigned int hashValue = hash(iter->m_key);
					Bucket& newBucket = m_buckets[hashValue & (m_numBuckets-1)];
					newBucket.push_back(*iter);
				}
			}

			delete[] oldBuckets;

			//free old locks and create new locks, double the size of the old array also
			m_alloc.destroyArray<Lock>(m_locks, m_numLocks);
			unsigned int newNumLocks = m_numLocks * 2;
			m_locks = m_alloc.createArray<Lock>(newNumLocks); //write m_locks before m_nuLocks, avoids array bounds error in acquireLock
			m_numLocks = newNumLocks;

			m_isResizing = false;
		}
	}
}

#endif
