/*
 * $Id: HashTable.hpp 127 2011-09-21 06:45:52Z hyunghwan.chung $
 *
   Copyright 2005-2009 Chung, Hyung-Hwan.

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an  BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
 */

#ifndef _XP_BAS_HASHTABLE_CLASS_
#define _XP_BAS_HASHTABLE_CLASS_

#include <xp/Hashable.hpp>
#include <xp/bas/Pair.hpp>
#include <xp/bas/LinkedList.hpp>
#include <xp/bas/assert.h>

XP_BEGIN_NAMESPACE2 (xp, bas)

template<typename T>
struct HashTableHasher
{
	size_t operator() (const T& v) const
	{
		return v.hashCode();
	}
};

struct HashTableResizer
{
	xp_size_t operator() (xp_size_t current) const
	{
		return (current < 5000)?   (current + current):
		       (current < 50000)?  (current + (current / 2)):
		       (current < 100000)? (current + (current / 4)):
		       (current < 150000)? (current + (current / 8)):
		                           (current + (current / 16));
	}
};

template <typename K, typename V, typename HASHER = HashTableHasher<K>, typename RS = HashTableResizer>
class HashTable
{
public:
	typedef Pair<K,V>   Entry;
	typedef LinkedList<Entry> Bucket;
	typedef HashTable<K,V,HASHER,RS> ThisHashTable;

	HashTable (xp_size_t bucket_size = 10, xp_size_t load_factor = 75)
	{
		this->entry_count = 0;
		this->bucket_size = bucket_size;
		this->buckets     = new Bucket[bucket_size];
		this->load_factor = load_factor;
		this->threshold   = bucket_size * load_factor / 100;
	}
	HashTable (const ThisHashTable& table)
	{
		this->entry_count = 0;
		this->bucket_size = table.bucket_size;
		this->buckets     = new Bucket[table.bucket_size];
		this->load_factor = table.load_factor;
		this->threshold   = table.bucket_size * table.load_factor / 100;

		for (xp_size_t i = 0; i < table.bucket_size; i++) 
		{
			Bucket& b = table.buckets[i];
			typename Bucket::Node* np;
			for (np = b.head(); np != XP_NULL; np = np->forward()) {
				Entry& e = np->value;
				xp_size_t hc = hasher(e.key) % this->bucket_size;
				this->buckets[hc].append (e);	
				this->entry_count++;
			}
		}

		// doesn't need to rehash in the copy constructor.
		//if (entry_count >= threshold) rehash ();
	}

	~HashTable ()
	{
		clear ();
		if (this->buckets != XP_NULL) delete[] this->buckets;
	}

	ThisHashTable& operator= (const ThisHashTable& table)
	{
		if (this != &table)
		{
			this->clear ();

			for (xp_size_t i = 0; i < table.bucket_size; i++) {
				Bucket& b = table.buckets[i];
				typename Bucket::Node* np;
				for (np = b.head(); np != XP_NULL; np = np->forward()) {
					Entry& e = np->value;
					xp_size_t hc = hasher(e.key) % bucket_size;
					buckets[hc].append (e);	
					entry_count++;
				}
			}

			if (entry_count >= threshold) this->rehash ();
		}
		return *this;
	}

	xp_size_t size () const
	{
		return this->entry_count;
	}

	bool isEmpty () const
	{
		return this->entry_count == 0;
	}

	xp_size_t bucketSize () const
	{
		return this->bucket_size;
	}

	Bucket& bucket (xp_size_t index) const
	{
		xp_assert (index < this->bucket_size);
		return this->buckets[index];	
	}			

	V& operator[] (const K& key)
	{
		xp_size_t hc = hasher(key) % bucket_size;
	
		typename Bucket::Node* np;
		for (np = buckets[hc].head(); np != XP_NULL; np = np->forward()) {
			Entry& e = np->value;
			if (key == e.key) return e.value;
		}

		if (entry_count >= threshold) {
			rehash ();
			hc = hasher(key) % bucket_size;
		}

		Entry& e2 = buckets[hc].append (Entry(key));
		entry_count++;
		return e2.value;
	}	

	const V& operator[] (const K& key) const
	{
		xp_size_t hc = hasher(key) % bucket_size;
	
		typename Bucket::Node* np;
		for (np = buckets[hc].head(); np != XP_NULL; np = np->forward()) {
			Entry& e = np->value;
			if (key == e.key) return e.value;
		}

		if (entry_count >= threshold) {
			rehash ();
			hc = hasher(key) % bucket_size;
		}

		Entry& e2 = buckets[hc].append (Entry(key));
		entry_count++;
		return e2.value;
	}	

	//
	// NOTE: getConstWithCustomKey() and getWithCustomKey() would
	//       not need to have different names if compilers were smarter.
	//

	template <typename MK, typename MHASHER>
	const V* getConstWithCustomKey (const MK& key) const
	{
		MHASHER h;
		xp_size_t hc = h(key) % bucket_size;
	
		typename Bucket::Node* np;
		for (np = buckets[hc].head(); np != XP_NULL; np = np->forward()) {
			const Entry& e = np->value;
			if (key == e.key) return &e.value;
		}

		return XP_NULL;
	}

	template <typename MK, typename MHASHER>
	V* getWithCustomKey (const MK& key) const
	{
		MHASHER h;
		xp_size_t hc = h(key) % bucket_size;
	
		typename Bucket::Node* np;
		for (np = buckets[hc].head(); np != XP_NULL; np = np->forward()) {
			Entry& e = np->value;
			if (key == e.key) return &e.value;
		}

		return XP_NULL;
	}

	V* get (const K& key)
	{
		xp_size_t hc = hasher(key) % bucket_size;
	
		typename Bucket::Node* np;
		for (np = buckets[hc].head(); np != XP_NULL; np = np->forward()) {
			Entry& e = np->value;
			if (key == e.key) return &e.value;
		}

		return XP_NULL;
	}

	const V* get (const K& key) const
	{
		xp_size_t hc = hasher(key) % bucket_size;
	
		typename Bucket::Node* np;
		for (np = buckets[hc].head(); np != XP_NULL; np = np->forward()) {
			const Entry& e = np->value;
			if (key == e.key) return &e.value;
		}

		return XP_NULL;
	}

	V* get (const K& key, 
		xp_size_t* hash_code, typename Bucket::Node** node)
	{
		xp_size_t hc = hasher(key) % bucket_size;
	
		typename Bucket::Node* np;
		for (np = buckets[hc].head(); np != XP_NULL; np = np->forward()) {
			Entry& e = np->value;
			if (key == e.key) {
				*hash_code = hc;
				*node = np;
				return &e.value;
			}
		}

		return XP_NULL;
	}

	const V* get (const K& key, 
		xp_size_t* hash_code, typename Bucket::Node** node) const
	{
		xp_size_t hc = hasher(key) % bucket_size;
	
		typename Bucket::Node* np;
		for (np = buckets[hc].head(); np != XP_NULL; np = np->forward()) 
		{
			Entry& e = np->value;
			if (key == e.key) 
			{
				*hash_code = hc;
				*node = np;
				return &e.value;
			}
		}

		return XP_NULL;
	}

	int put (const K& key, const V& value)
	{
		upsert (key, value);
		return 0;
	}

	int putNew (const K& key, const V& value)
	{
		return (insertNew(key,value) == XP_NULL)? -1: 0;
	}

	V* insert (const K& key)
	{
		xp_size_t hc = hasher(key) % bucket_size;

		typename Bucket::Node* np;
		for (np = buckets[hc].head(); np != XP_NULL; np = np->forward()) 
		{
			Entry& e = np->value;
			if (key == e.key) return &e.value;
		}

		if (entry_count >= threshold) 
		{
			rehash ();
			hc = hasher(key) % bucket_size;
		}

		Entry& e = buckets[hc].append (Entry(key));
		entry_count++;
		return &e.value;
	}

	V* insert (const K& key, const V& value)
	{
		xp_size_t hc = hasher(key) % bucket_size;

		typename Bucket::Node* np;
		for (np = buckets[hc].head(); np != XP_NULL; np = np->forward()) 
		{
			Entry& e = np->value;
			if (key == e.key) return &e.value;
		}

		if (entry_count >= threshold) 
		{
			rehash ();
			hc = hasher(key) % bucket_size;
		}

		Entry& e = buckets[hc].append (Entry(key,value));
		entry_count++;
		return &e.value;
	}

	V* insertNew (const K& key)
	{
		xp_size_t hc = hasher(key) % bucket_size;

		typename Bucket::Node* np;
		for (np = buckets[hc].head(); np != XP_NULL; np = np->forward()) 
		{
			Entry& e = np->value;
			if (key == e.key) return XP_NULL;
		}

		if (entry_count >= threshold) 
		{
			rehash ();
			hc = hasher(key) % bucket_size;
		}

		Entry& e = buckets[hc].append (Entry(key));
		entry_count++;
		return &e.value;
	}

	V* insertNew (const K& key, const V& value)
	{
		xp_size_t hc = hasher(key) % bucket_size;

		typename Bucket::Node* np;
		for (np = buckets[hc].head(); np != XP_NULL; np = np->forward()) 
		{
			Entry& e = np->value;
			if (key == e.key) return XP_NULL;
		}

		if (entry_count >= threshold) 
		{
			rehash ();
			hc = hasher(key) % bucket_size;
		}

		Entry& e = buckets[hc].append (Entry(key, value));
		entry_count++;
		return &e.value;
	}

	V* upsert (const K& key, const V& value)
	{
		xp_size_t hc = hasher(key) % bucket_size;

		typename Bucket::Node* np;
		for (np = buckets[hc].head(); np != XP_NULL; np = np->forward()) 
		{
			Entry& e = np->value;
			if (key == e.key) 
			{
				e.value = value;
				return &e.value;
			}
		}

		if (entry_count >= threshold) 
		{
			rehash ();
			hc = hasher(key) % bucket_size;
		}

		Entry& e = buckets[hc].append (Entry(key,value));
		entry_count++;
		return &e.value;
	}

	template <typename MK, typename MHASHER>
	int removeWithCustomKey (const MK& key)
	{
		MHASHER h;
		xp_size_t hc = h(key) % bucket_size;

		typename Bucket::Node* np;
		for (np = buckets[hc].head(); np != XP_NULL; np = np->forward()) 
		{
			Entry& e = np->value;
			if (key == e.key) 
			{
				this->buckets[hc].remove (np);
				this->entry_count--;
				return 0;
			}
		}

		return -1;
	}

	int remove (const K& key)
	{
		xp_size_t hc = hasher(key) % bucket_size;

		typename Bucket::Node* np;
		for (np = buckets[hc].head(); np != XP_NULL; np = np->forward()) 
		{
			Entry& e = np->value;
			if (key == e.key) 
			{
				this->buckets[hc].remove (np);
				this->entry_count--;
				return 0;
			}
		}

		return -1;
	}

	int remove (xp_size_t hc, typename Bucket::Node* np)
	{
		//
		// WARNING: this method should be used with extra care.
		//
		this->buckets[hc].remove (np);
		this->entry_count--;
		return 0;
	}

	xp_size_t removeValue (const V& value)
	{
		xp_size_t count = 0;

		for (xp_size_t i = 0; i < this->bucket_size; i++) 
		{
			typename Bucket::Node* np, * np2;
			np = buckets[i].head();
			while (np != XP_NULL) {
				Entry& e = np->value;
				np2 = np->forward ();
				if (value == e.value) {
					this->remove (i, np);
					count++;
				}
				np = np2;
			}
		}

		return count;
	}

	bool containsKey (const K& key) const
	{
		xp_size_t hc = hasher(key) % bucket_size;
	
		typename Bucket::Node* np;
		for (np = buckets[hc].head(); np != XP_NULL; np = np->forward()) 
		{
			Entry& e = np->value;
			if (key == e.key) return true;
		}

		return false;
	}

	bool containsValue (const K& value) const
	{
		for (xp_size_t i = 0; i < bucket_size; i++) 
		{
			typename Bucket::Node* np;
			for (np = buckets[i].head(); 
			     np != XP_NULL; np = np->forward()) 
			{
				Entry& e = np->value;
				if (value == e.value) return true;
			}
		}

		return false;
	}

	void clear (int new_bucket_size = 0)
	{
		for (xp_size_t i = 0; i < bucket_size; i++) buckets[i].clear ();
		entry_count = 0;	

		if (new_bucket_size > 0)
		{
			Bucket* tmp = new Bucket[new_bucket_size];
			bucket_size = new_bucket_size;
			threshold   = bucket_size * load_factor / 100;
			delete[] buckets;	
			buckets = tmp;
		}
	}

	typedef int (ThisHashTable::*TraverseCallback) 
		(const Entry& entry, void* user_data) const;

	int traverse (TraverseCallback callback, void* user_data) const
	{
		for (xp_size_t i = 0; i < this->bucket_size; i++) 
		{
			typename Bucket::Node* np;
			for (np = buckets[i].head(); 
			     np != XP_NULL; np = np->forward()) 
			{
				const Entry& e = np->value;
				if ((this->*callback)(e,user_data) == -1) return -1;
			}
		}

		return 0;
	}

	typedef int (*StaticTraverseCallback) 
		(ThisHashTable* table, Entry& entry, void* user_data);

	int traverse (StaticTraverseCallback callback, void* user_data) 
	{
		for (xp_size_t i = 0; i < this->bucket_size; i++) 
		{
			typename Bucket::Node* np;
			for (np = buckets[i].head(); 
			     np != XP_NULL; np = np->forward()) 
			{
				Entry& e = np->value;
				if (callback(this,e,user_data) == -1) return -1;
			}
		}

		return 0;
	}
	
protected:
	mutable xp_size_t  entry_count;
	mutable xp_size_t  bucket_size;
	mutable Bucket*    buckets;
	mutable xp_size_t  threshold;
	xp_size_t load_factor;
	HASHER hasher;
	RS resizer;

	void rehash () const
	{
		xp_size_t new_bucket_size = this->resizer (this->bucket_size);
		Bucket* new_buckets = new Bucket[new_bucket_size];

		try 
		{
			for (xp_size_t i = 0; i < this->bucket_size; i++) 
			{
				/*
				typename Bucket::Node* np;
				for (np = buckets[i].head(); 
				     np != XP_NULL; np = np->forward()) 
				{
					const Entry& e = np->value;
					xp_size_t hc = e.key.hashCode() % new_bucket_size;
					new_buckets[hc].append (e);
				}
				*/

				// this approach save redundant memory allocation
				// and retains the previous pointers before rehashing.
				// if the bucket uses a memory pool, this would not
				// work. fortunately, the hash table doesn't use it
				// for a bucket.
				typename Bucket::Node* np = buckets[i].head();
				while (np != NULL)
				{
					typename Bucket::Node* next = np->forward();
					const Entry& e = np->value;
					xp_size_t hc = hasher(e.key) % new_bucket_size;
					new_buckets[hc].appendNode (buckets[i].yield(np));
					np = next;
				}
			}
		}
		catch (...) 
		{
			delete[] new_buckets;
			throw;
		}

		delete[] this->buckets;
		this->buckets     = new_buckets;
		this->bucket_size = new_bucket_size;
		this->threshold   = this->load_factor * this->bucket_size / 100;
	}
};

XP_END_NAMESPACE2 (xp, bas)

#endif
