#ifndef __CONCURRENT_HOPSCOTCH_HASHMAP__
#define __CONCURRENT_HOPSCOTCH_HASHMAP__

////////////////////////////////////////////////////////////////////////////////
// ConcurrentHopscotchHashMap Class
//
////////////////////////////////////////////////////////////////////////////////
//TERMS OF USAGE
//----------------------------------------------------------------------
//
//	Permission to use, copy, modify and distribute this software and
//	its documentation for any purpose is hereby granted without fee,
//	provided that due acknowledgments to the authors are provided and
//	this permission notice appears in all copies of the software.
//	The software is provided "as is". There is no warranty of any kind.
//
//Authors:
//	Maurice Herlihy
//	Brown University
//	and
//	Nir Shavit
//	Tel-Aviv University
//	and
//	Moran Tzafrir
//	Tel-Aviv University
//
//	Date: July 15, 2008.  
//
////////////////////////////////////////////////////////////////////////////////
// Programmer : Moran Tzafrir (MoranTza@gmail.com)
//
////////////////////////////////////////////////////////////////////////////////


////////////////////////////////////////////////////////////////////////////////
// INCLUDE DIRECTIVES
////////////////////////////////////////////////////////////////////////////////
#include <stdio.h>
#include <limits.h>
#include "math.h"
#include "memory.h"

////////////////////////////////////////////////////////////////////////////////
// CONSTS
////////////////////////////////////////////////////////////////////////////////

////////////////////////////////////////////////////////////////////////////////
// FUNCTIONS: Small Utilities
////////////////////////////////////////////////////////////////////////////////

////////////////////////////////////////////////////////////////////////////////
// CLASS: ConcurrentHopscotchHashMap
////////////////////////////////////////////////////////////////////////////////
template <typename	_tKey, 
          typename	_tData,
          typename	_tHash,
          typename	_tLock>
class ConcurrentHopscotchHashMap {
private:

	struct Segment;

	//constants
	static const short _null_delta = SHRT_MIN;
	static const short _timestamp_mask = (4-1);

	//fields
	const int	   _segment_shift;
	const int	   _segment_mask;
	Segment* const	_segments;
	const int		_bucket_mask;
	const int		_bucket_shift;

	const int	   _cache_mask;
	const bool     _is_cacheline_alignment;

	//inner classes
	struct Bucket {
		short				volatile _first_delta;
		short				volatile _next_delta;
		unsigned int	volatile _hash;
		_tKey				volatile _key;
		_tData			volatile _data;

		Bucket()
		: _first_delta (_null_delta), 
		  _next_delta  (_null_delta), 
		  _hash (_tHash::_EMPTY_HASH),
		  _key  (_tHash::_EMPTY_KEY),
		  _data (_tHash::_EMPTY_DATA)
		{ 
		}
	};

	struct Segment {
		int volatile				_table_mask;
		unsigned int volatile	_timestamp[_timestamp_mask+1];
		Bucket*						_table[32];
		int volatile				_resize_table_mask;
		_tLock	               _lock;
		unsigned int volatile	_count;

		Segment() {
			_table_mask = -1;
			release();
		}

		~Segment()
		{
			release();
		}

		void resize(const int bucket_shift, const int bucket_mask)
		{
			const int old_num_tables( _table_mask+1    );
			const int new_num_tables( 2*old_num_tables );
			const int new_table_mask( new_num_tables-1 );
			_resize_table_mask = new_table_mask;
			printf("resize - %d -> %d\n", old_num_tables*(bucket_mask+1),  new_num_tables*(bucket_mask+1));

			//allocate new tables and initialize them (double number of tables)
			for (int iTbl(old_num_tables); iTbl < new_num_tables; ++iTbl) {
				_table[iTbl] = new Bucket[bucket_mask + 1];
			}

			//move half the keys to the new tables
			for (int iTbl(0); iTbl < old_num_tables; ++iTbl) {

				Bucket* START_BUCKET_OLD( _table[iTbl] );
				Bucket* START_BUCKET_NEW( _table[iTbl + old_num_tables] );

				for (int iBucket(0); iBucket <= bucket_mask; ++iBucket, ++START_BUCKET_OLD, ++START_BUCKET_NEW) {

					Bucket* last_new_bucket ( NULL );
					Bucket* last_old_bucket ( NULL );
					Bucket* curr_old_bucket ( START_BUCKET_OLD );
					Bucket* curr_new_bucket ( START_BUCKET_NEW );

					//go over the keys in current bucket
					int next_old_delta( curr_old_bucket->_first_delta );

					while(_null_delta != next_old_delta) {

						curr_old_bucket += next_old_delta;
						curr_new_bucket += next_old_delta;

						//if key's position need changed because of the resize
						const int new_table_num( get_table_num(curr_old_bucket->_hash, new_table_mask, bucket_shift) );
						if(iTbl != new_table_num) {

							//initialize new bucket
							_tHash::relocate_data_reference(curr_new_bucket->_data, curr_old_bucket->_data);
							_tHash::relocate_key_reference(curr_new_bucket->_key, curr_old_bucket->_key);
							curr_new_bucket->_hash = curr_old_bucket->_hash;

							//update new list
							if(NULL == last_new_bucket)
								START_BUCKET_NEW->_first_delta = curr_new_bucket - START_BUCKET_NEW;
							else
								last_new_bucket->_next_delta = curr_new_bucket - last_new_bucket;
							last_new_bucket = curr_new_bucket;

							//update old list
							if(NULL == last_old_bucket) {
								if(_null_delta == curr_old_bucket->_next_delta)
									START_BUCKET_OLD->_first_delta = _null_delta;
								else 
									START_BUCKET_OLD->_first_delta = (curr_old_bucket + curr_old_bucket->_next_delta) - START_BUCKET_OLD;
							}
							else {
								if(_null_delta == curr_old_bucket->_next_delta)
									last_old_bucket->_next_delta = _null_delta;
								else
									last_old_bucket->_next_delta = (curr_old_bucket + curr_old_bucket->_next_delta) - last_old_bucket;
							}

							//free bucket
							curr_old_bucket->_hash		   =  _tHash::_EMPTY_HASH;
							_tHash::relocate_key_reference(curr_old_bucket->_key, _tHash::_EMPTY_KEY);
							_tHash::relocate_data_reference(curr_old_bucket->_data, _tHash::_EMPTY_DATA);
							next_old_delta = curr_old_bucket->_next_delta;
							curr_old_bucket->_next_delta  = _null_delta;
						} else {
							last_old_bucket = curr_old_bucket;
							next_old_delta = curr_old_bucket->_next_delta;
						}

					}//for on keys

				}//for on buckets

				_table_mask = new_table_mask;
				_resize_table_mask = -1;
			}//for on tables
		}

		int get_table_num(const unsigned int hash, const int table_mask, const int bucket_shift) 
		{
			return ((hash >> bucket_shift) & table_mask);
		}

		void init(const int table_size)
		{
			release();

			//allocate table and initialize it
			_table_mask = 0;
			_resize_table_mask = -1;
			_table[0] = new Bucket[table_size];
		}

		void release()
		{
			//release old resources
			if(-1 != _table_mask) {
				for(int iTbl(0); iTbl <= _table_mask; ++iTbl) {
					delete [] _table[iTbl];
					_table[iTbl] = NULL;
				}
				_table_mask = -1;
			}
			_table_mask = -1;
			_resize_table_mask = -1;
			for (int i(0); i <= _timestamp_mask; ++i)
				_timestamp[i] = 0;
			_lock.release();
			_count = 0;
		}
	};

	//utilities
	static unsigned int calc_divide_shift(const unsigned int value)
	{
		unsigned int numShift( 0 );
		unsigned int curr( 1 );
		while (curr < value) {
			curr <<= 1;
			++numShift;
		}
		return numShift;
	}
	static unsigned int nearest_power_of_two(const unsigned int value)
	{
		unsigned int rc( 1 );
		while (rc < value) {
			rc <<= 1;
		}
		return rc;
	}

	//help methods
	int calc_bucket_mask(const int initial_capacity, const int concurrency_level)
	{
		int bucket_mask(nearest_power_of_two(initial_capacity/(nearest_power_of_two(concurrency_level))) - 1);

		if(bucket_mask < 511)
			bucket_mask = 511;

		return bucket_mask;
	}
	Segment& get_segment(const unsigned int hash)
	{
		return _segments[(hash >> _segment_shift) & _segment_mask];
	}

	Bucket* get_table(Segment& segment, const unsigned int hash) 
	{
		return segment._table[(hash >> _bucket_shift) & segment._table_mask];
	}

	Bucket* get_table_resize(Segment& segment, const unsigned int hash, int resize_table_mask) 
	{
		return segment._table[(hash >> _bucket_shift) & resize_table_mask];
	}

	Bucket* get_bucket(Bucket* table, const unsigned int hash)
	{
		return &table[hash & _bucket_mask];
	}
	Bucket* get_start_cacheline_bucket(Bucket* const table, Bucket* const bucket)
	{
		return (bucket - ((bucket - table) & _cache_mask)); 
	}

	void remove_key(Segment&			  segment,
                   Bucket* const		  from_bucket,
						 Bucket* const		  key_bucket, 
						 Bucket* const		  prev_key_bucket, 
						 const unsigned int hash) 
	{
		key_bucket->_hash  = _tHash::_EMPTY_HASH;
		key_bucket->_key   = _tHash::_EMPTY_KEY;
		key_bucket->_data  = _tHash::_EMPTY_DATA;

		if(NULL == prev_key_bucket) {
			if (_null_delta == key_bucket->_next_delta)
				from_bucket->_first_delta = _null_delta;
			else 
				from_bucket->_first_delta = (from_bucket->_first_delta + key_bucket->_next_delta);
		} else {
			if (_null_delta == key_bucket->_next_delta)
				prev_key_bucket->_next_delta = _null_delta;
			else 
				prev_key_bucket->_next_delta = (prev_key_bucket->_next_delta + key_bucket->_next_delta);
		}

		++(segment._timestamp[hash & _timestamp_mask]);
		key_bucket->_next_delta = _null_delta;
		--(segment._count);
	}
	void add_key_to_begining_of_list(Bucket*	const     keys_bucket, 
										      Bucket*	const		 free_bucket,
                                    const unsigned int hash,
                                    const _tKey&		 key, 
                                    const _tData& 		 data) 
	{
    free_bucket->_data = data;
    free_bucket->_key  = key;
    free_bucket->_hash = hash;

    if(0 == keys_bucket->_first_delta) {
      if(_null_delta == keys_bucket->_next_delta)
        free_bucket->_next_delta = _null_delta;
      else
        free_bucket->_next_delta = (short)((keys_bucket +  keys_bucket->_next_delta) -  free_bucket);
      keys_bucket->_next_delta = (short)(free_bucket - keys_bucket);
    } else {
      if(_null_delta ==  keys_bucket->_first_delta)
        free_bucket->_next_delta = _null_delta;
      else
        free_bucket->_next_delta = (short)((keys_bucket +  keys_bucket->_first_delta) -  free_bucket);
      keys_bucket->_first_delta = (short)(free_bucket - keys_bucket);
    }
  }

	void add_key_to_end_of_list(Bucket* const      keys_bucket, 
                               Bucket* const		  free_bucket,
                               const unsigned int hash,
                               const _tKey&		  key, 
										 const _tData&		  data,
                               Bucket* const		  last_bucket)
	{
		free_bucket->_data		 = data;
		free_bucket->_key			 = key;
		free_bucket->_hash		 = hash;
		free_bucket->_next_delta = _null_delta;

		if(NULL == last_bucket)
			keys_bucket->_first_delta = (short)(free_bucket - keys_bucket);
		else 
			last_bucket->_next_delta = (short)(free_bucket - last_bucket);
	}

	void optimize_cacheline_use(Segment&      segment, 
										 Bucket* const table,
										 Bucket* const free_bucket)
	{
		Bucket* const start_cacheline_bucket(get_start_cacheline_bucket(table, free_bucket));
		Bucket* const end_cacheline_bucket(start_cacheline_bucket + _cache_mask);
		Bucket* opt_bucket(start_cacheline_bucket);

		do {
			if( _null_delta != opt_bucket->_first_delta) {
				Bucket* relocate_key_last (NULL);
				int curr_delta(opt_bucket->_first_delta);
				Bucket* relocate_key ( opt_bucket + curr_delta);
				do {
					if( curr_delta < 0 || curr_delta > _cache_mask ) {
						_tHash::relocate_data_reference(free_bucket->_data, relocate_key->_data);
						_tHash::relocate_key_reference(free_bucket->_key, relocate_key->_key);
						free_bucket->_hash  = relocate_key->_hash;

						if(_null_delta == relocate_key->_next_delta)
							free_bucket->_next_delta = _null_delta;
						else
							free_bucket->_next_delta = (short)( (relocate_key + relocate_key->_next_delta) - free_bucket );

						if(NULL == relocate_key_last)
							opt_bucket->_first_delta = (short)( free_bucket - opt_bucket );
						else
							relocate_key_last->_next_delta = (short)( free_bucket - relocate_key_last );

						++(segment._timestamp[relocate_key->_hash & _timestamp_mask]);
						relocate_key->_hash			= _tHash::_EMPTY_HASH;
						_tHash::relocate_key_reference(relocate_key->_key, _tHash::_EMPTY_KEY);
						_tHash::relocate_data_reference(relocate_key->_data, _tHash::_EMPTY_DATA);
						relocate_key->_next_delta	= _null_delta;
						return;
					}

					if(_null_delta == relocate_key->_next_delta)
						break;
					relocate_key_last = relocate_key;
					curr_delta += relocate_key->_next_delta;
					relocate_key += relocate_key->_next_delta;
				} while(true);//for on list
			}
			++opt_bucket;
		} while (opt_bucket <= end_cacheline_bucket);
	}


	virtual void init()
	{
		//allocate the segments, & init buckets
		for (int iSeg(0); iSeg <= _segment_mask; ++iSeg) {
			_segments[iSeg]._table_mask = -1;
			_segments[iSeg].init(_bucket_mask + 1);
		}
	}

	virtual void release()
	{		
		//release the resources of the segments
		for (int iSeg(0); iSeg <= _segment_mask; ++iSeg) {
			_segments[iSeg].release();
		}
	}

public:
	//ctors
	ConcurrentHopscotchHashMap(
		const int   initial_capacity	    = 32*1024,	//init capacity
		const int	concurrency_level	    = 32,		//num of updating threads
		const int   cache_line_size       = 64,		//Cache-line size of machine
		const bool	is_optimize_cacheline = true)		//use higher values when high update rate
	:	_segment_mask	( nearest_power_of_two(concurrency_level) - 1 ),
		_segment_shift ( 16 ),
		_segments      ( new Segment[_segment_mask+1] ),
		_bucket_mask   ( calc_bucket_mask(initial_capacity, concurrency_level) ),
		_bucket_shift  ( calc_divide_shift(_bucket_mask)),
		_cache_mask	   ( (cache_line_size / sizeof(Bucket)) - 1 ),
		_is_cacheline_alignment( is_optimize_cacheline )
	{
		if( initial_capacity <= 0 || concurrency_level <= 0 )
			throw;

		//allocate the segments, & init buckets
		init();
	}

	virtual ~ConcurrentHopscotchHashMap() 
	{
		release();
		delete [] _segments;
	} 


	//query

	// --------------------------------------------------
	// explanation:
	// --------------------------------------------------
	//	To check if the key exits in the hash-map,
	//  we go over the list of keys in the key's bucket.
	// --------------------------------------------------
	// pseudo-code :
	// --------------------------------------------------
	//	bool ContainsKey(const _tKey key)
	//		hash = key's hash code;
	//		keys_segment = key's segment;
	//		keys_bucket = key's bucket in segment;
	//		curr_key = first key of key's bucket list;
	//
	//		while (not reached end of keys list) {
	//			if(key == curr_key->key)
	//				return true;
	//			curr_key = next key in list;
	//		}
	//		return false;
	// --------------------------------------------------
	virtual bool containsKey( const _tKey& key ) 
	{
      const unsigned int hash	   ( _tHash::Calc(key) );
      Segment&				 segment ( get_segment(hash) );

      //go over the list and look for key
		int          start_table_mask;
		unsigned int start_timestamp;
      do {
			start_table_mask = segment._table_mask;
			start_timestamp = segment._timestamp[hash & _timestamp_mask];

			Bucket* curr_bucket( get_bucket(get_table(segment, hash), hash) );
         short next_delta( curr_bucket->_first_delta );
         while( _null_delta != next_delta ) {
				curr_bucket += next_delta;
				if(hash == curr_bucket->_hash && _tHash::IsEqual(key, curr_bucket->_key))
					return true;
				next_delta = curr_bucket->_next_delta;
			}

			const int resize_table_mask(segment._resize_table_mask);
			if(-1 != resize_table_mask) {
				Bucket* curr_bucket( get_bucket(get_table_resize(segment, hash, resize_table_mask), hash) );
				short next_delta( curr_bucket->_first_delta );
				while( _null_delta != next_delta ) {
					curr_bucket += next_delta;
					if(hash == curr_bucket->_hash && _tHash::IsEqual(key, curr_bucket->_key))
						return true;
					next_delta = curr_bucket->_next_delta;
				}
			}

		} while(start_timestamp != segment._timestamp[hash & _timestamp_mask] 
		        || start_table_mask != segment._table_mask);

		return false;
	}

	virtual _tData get( const _tKey& key ) 
	{
		const unsigned int hash( _tHash::Calc(key) );
		Segment& segment		( get_segment(hash) );

		//go over the list and look for key
		int          start_table_mask;
		unsigned int start_timestamp;
		do {
			start_table_mask = segment._table_mask;
			start_timestamp = segment._timestamp[hash & _timestamp_mask];

			Bucket* curr_bucket( get_bucket(get_table(segment, hash), hash) );
			short next_delta(curr_bucket->_first_delta);
			while (_null_delta != next_delta) {
				curr_bucket += next_delta;
				if(hash == curr_bucket->_hash && _tHash::IsEqual(key, curr_bucket->_key)) {
					_tData rc( (_tData&)(curr_bucket->_data) );
					if(_tHash::_EMPTY_HASH == curr_bucket->_hash || start_timestamp != segment._timestamp[hash & _timestamp_mask])
						break;
					return rc;
				}
				next_delta = curr_bucket->_next_delta;
			}

			const int resize_table_mask(segment._resize_table_mask);
			if(-1 != resize_table_mask) {
				Bucket* curr_bucket( get_bucket(get_table_resize(segment, hash, resize_table_mask), hash) );
				short next_delta(curr_bucket->_first_delta);
				while (_null_delta != next_delta) {
					curr_bucket += next_delta;
					if(hash == curr_bucket->_hash && _tHash::IsEqual(key, curr_bucket->_key)) {
						_tData rc( (_tData&)(curr_bucket->_data) );
						if(_tHash::_EMPTY_HASH == curr_bucket->_hash || start_timestamp != segment._timestamp[hash & _timestamp_mask])
							break;
						return rc;
					}
					next_delta = curr_bucket->_next_delta;
				}
			}

		} while(start_timestamp != segment._timestamp[hash & _timestamp_mask]
				  || start_table_mask != segment._table_mask);

		return _tHash::_EMPTY_DATA;
	}

	//modification

	// ---------------------------------------------------------------
	// PutIfAbsent - explanation:
	// ---------------------------------------------------------------
	//	(1) When adding new key, first we have to lock the key's segment.
	//  (2) Then we verify that the key does not already exists 
	//      in the key's bucket. 
	//  (3) If the key already exists we unlock and return.
	//  (4) Else we need to add the new key. we prefer 
	//      adding the new key to the cache-line of the 
	//      key's bucket.
	//  (5) So first we go over the buckets that reside in 
	//      the cache-line of the key's bucket. If we found
	//      empty bucket, we add the new key, unlock and return.
	//  (6) We can not add the key to the cache-line, so find
	//      arbitrary empty bucket, in range H from the bucket.
	//  (7) If found empty bucket in range H, add the key, unlock, and return.
	//  (8) If found empty bucket but not in range, use a sequence of
	//      displacements to make an empty bucket in range H.
	//  (8) If sequence of displacements manged to create empty bucket
	//	    in range H, add the new key, unlock and return.
	//  (8) Could not find empty bucket for the new key, in range H, 
	//      so resize, and try add the new key again.
	// ---------------------------------------------------------------
	virtual _tData putIfAbsent(const _tKey& key, const _tData& data)
	{
		const unsigned int hash   ( _tHash::Calc(key) );
		Segment&			    segment( get_segment(hash) );

		//go over the list and look for key
		segment._lock.aquire();
		Bucket* const table( get_table(segment, hash) );
		Bucket* const start_bucket( get_bucket(table, hash) );
		Bucket* last_bucket( NULL );
		Bucket* compare_bucket( start_bucket );
		short next_delta(compare_bucket->_first_delta);
		while (_null_delta != next_delta) {
			compare_bucket += next_delta;
			if( hash == compare_bucket->_hash && _tHash::IsEqual(key, compare_bucket->_key) ) {
				_tData rc((_tData&)(compare_bucket->_data));
				segment._lock.release();
				return rc;
			}
			last_bucket = compare_bucket;
			next_delta = compare_bucket->_next_delta;
		}

		//try to place the key in the same cache-line
		if(_is_cacheline_alignment) {
			Bucket*	free_bucket( start_bucket );
			Bucket*	start_cacheline_bucket(get_start_cacheline_bucket(table, start_bucket));
			Bucket*	end_cacheline_bucket(start_cacheline_bucket + _cache_mask);
			do {
				if( _tHash::_EMPTY_HASH == free_bucket->_hash ) {
					add_key_to_begining_of_list(start_bucket, free_bucket, hash, key, data);
					++(segment._count);
					segment._lock.release();
					return _tHash::_EMPTY_DATA;
				}
				++free_bucket;
				if(free_bucket > end_cacheline_bucket)
					free_bucket = start_cacheline_bucket;
			} while(start_bucket != free_bucket);
		}

		//place key in arbitrary free forward bucket
		Bucket* max_bucket( start_bucket + (SHRT_MAX-1) );
		Bucket* last_table_bucket(table + _bucket_mask);
		if(max_bucket > last_table_bucket)
			max_bucket = last_table_bucket;
		Bucket* free_max_bucket( start_bucket + _cache_mask + 1 );
		while (free_max_bucket <= max_bucket) {
			if( _tHash::_EMPTY_HASH == free_max_bucket->_hash ) {
				add_key_to_end_of_list(start_bucket, free_max_bucket, hash, key, data, last_bucket);
				++(segment._count);
				segment._lock.release();
				return _tHash::_EMPTY_DATA;
			}
			free_max_bucket += 2;
		}

		//place key in arbitrary free backward bucket
		Bucket* min_bucket( start_bucket - (SHRT_MAX-1) );
		if(min_bucket < table)
			min_bucket = table;
		Bucket* free_min_bucket( start_bucket - _cache_mask - 1 );
		while (free_min_bucket >= min_bucket) {
			if( _tHash::_EMPTY_HASH == free_min_bucket->_hash ) {
				add_key_to_end_of_list(start_bucket, free_min_bucket, hash, key, data, last_bucket);
				++(segment._count);
				segment._lock.release();
				return _tHash::_EMPTY_DATA;
			}
			free_min_bucket -= 2;
		}

		//need resize the segment, and try add again
		segment.resize(_bucket_shift, _bucket_mask);
		segment._lock.release();
		return putIfAbsent(key, data);
		//return true;
	}

	// --------------------------------------------------
	// Remove - explanation:
	// --------------------------------------------------
	//	(1) When removing key, first we have to lock the key's segment.
	//  (2) Then we verify that the key exists in bucket x from the 
	//      list in key's bucket.
	//  (3) If the key does not exists, unlock and return.
	//  (4) Remove the key from the list in key's bucket. 
	//  (5) Mark the bucket x empty.
	//  (5) If we are not using cache-line optimization, unlock, and return.
	//  (6) There might be key m that belongs to the cache-line of bucket x, but
	//      reside on different cache-line. So we can move key m to bucket x, and
	//      improve the cache-line usage. So call the function optimizeCachelineUse
	//      that try to find such key y, and move it to it's cache-line.
	//  (7) unlock, and return.
	// --------------------------------------------------
	virtual _tData remove(const _tKey& key) 
	{
		const unsigned int hash( _tHash::Calc(key) );
		Segment& segment( get_segment(hash) );
		segment._lock.aquire();
		Bucket* const table( get_table(segment, hash) );
		Bucket* const start_bucket( get_bucket(table, hash) );
		Bucket* last_bucket( NULL );
		Bucket* curr_bucket( start_bucket );
		short	  next_delta (curr_bucket->_first_delta);
		do {
			if(_null_delta == next_delta) {
				segment._lock.release();
				return _tHash::_EMPTY_DATA;
			}
			curr_bucket += next_delta;

			if( hash == curr_bucket->_hash && _tHash::IsEqual(key, curr_bucket->_key) ) {
				_tData const rc((_tData&)(curr_bucket->_data));
				remove_key(segment, start_bucket, curr_bucket, last_bucket, hash);
				if( _is_cacheline_alignment )
					optimize_cacheline_use(segment, table, curr_bucket);
				segment._lock.release();
				return rc;
			}
			last_bucket = curr_bucket;
			next_delta = curr_bucket->_next_delta;
		} while(true);
		return _tHash::_EMPTY_DATA;
	}

	//general
	virtual unsigned int size()
	{
		unsigned int total_size(0);
		for(int iSeg(0); iSeg <= _segment_mask; ++iSeg) {
			total_size += _segments[iSeg]._count;
			printf("size seg %d; %u\n", iSeg, _segments[iSeg]._count);
		}
		return total_size;
	}   

	double percentKeysInCacheline()
	{
		unsigned int total_in_cache( 0 );
		unsigned int total( 0 );

		for(int iSeg(0); iSeg <= _segment_mask; ++iSeg) {

			Segment& segment(_segments[iSeg]);
			const int num_tables(segment._table_mask+1);
			
			for (int iTbl(0); iTbl<num_tables; ++iTbl) {
				Bucket* const table( segment._table[iTbl] );		
				Bucket* curr_bucket( segment._table[iTbl] );		

				for(int iElm(0); iElm <= _bucket_mask; ++iElm, ++curr_bucket) {

					if(_null_delta != curr_bucket->_first_delta) {
						Bucket* const startCacheLineBucket( get_start_cacheline_bucket(table, curr_bucket) );
						Bucket* check_bucket(curr_bucket + curr_bucket->_first_delta);
						int currDist( curr_bucket->_first_delta );
						do {
							++total;
							if( (check_bucket - startCacheLineBucket)  >= 0 && (check_bucket - startCacheLineBucket) <= _cache_mask )
								++total_in_cache;
							if(_null_delta == check_bucket->_next_delta)
								break;
							currDist += check_bucket->_next_delta;
							check_bucket += check_bucket->_next_delta;
						} while(true);
					}//if bucket as keys list

				}//for on buckets
			}//for on tables
		}//for on segments

		//return percent in cache
		return (((double)total_in_cache)/((double)total)*100.0);
	}
};

#endif
