/*
 * hashmap.c
 * Copyright (c) 2009 Vedant Kumar <vminch@gmail.com>
 * 		- with thanks to nvteighen and tinivole of ubuntuforums.org
 * 
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 * 
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

#include "map_internal.h"
#include "log.h"
#include "error.h"
#define MAP_DEFAULT_LOAD_FACTOR 0.75f
/**
 * init data for a given %entry
 */
#define hmap_entry_init(entry, hash_code, key, value, next_entry) {	\
	entry->hash = hash_code; \
	entry->k = key; \
	entry->v = value; \
	entry->next = next_entry; \
}

#ifdef MAP_DESTRUCTORS
/**
 * free a given %entry objects and both key, value of this entry
 */
#define hmap_entry_free(hmap, entry) { \
	if ((hmap->fns).del_key_fn) \
		(hmap->fns).del_key_fn(entry->k, hmap->storage, hmap->type); \
	if ((hmap->fns).del_val_fn) \
		(hmap->fns).del_val_fn(entry->v, hmap->storage, hmap->type); \
	mem_free(entry, hmap->storage, hmap->type); \
}
#else
/**
 * free a given %entry objects and both key, value of this entry
 */
#define hmap_entry_free(hmap, entry) { \
	mem_free(entry->k, hmap->storage, hmap->type); \
	mem_free(entry->v, hmap->storage, hmap->type); \
	mem_free(entry, hmap->storage, hmap->type); \
}
#endif

#define hmap_add_entry(hmap, hash, k, v, index) { \
	hash_entry *new_entry = NULL; \
	if (hmap->storage) { \
		new_entry = mem_storage_alloc(hmap->storage, sizeof(hash_entry), hmap->type); \
	} else { \
		new_entry = malloc(sizeof(hash_entry)); \
	} \
	hmap_entry_init(new_entry, hash, k, v, hmap->table[index]); \
	hmap->table[i] = new_entry; \
	if (hmap->size++ > hmap->threshold) { \
		hmap_resize(hmap, (hmap->capacity) << 1); \
	} \
}

// hashmaps need a hash function, an equality function, and a destructor
hashmap* hmap_new(map_fn *fn, mem_storage *storage, mem_type type) {
	hashmap* hmap = NULL;
	if (storage) {
		hmap = mem_storage_alloc(storage, sizeof(hashmap), type);
		hmap->type = type;
		int size = MAP_PRESET_SIZE * sizeof(hash_entry*);
		hmap->table = (hash_entry**) mem_storage_alloc(storage, size, type);
		memset(hmap->table, 0, size);
	} else {
		hmap = (hashmap*) malloc(sizeof(hashmap));
		hmap->table = (hash_entry**) calloc(MAP_PRESET_SIZE,
				sizeof(hash_entry*));
	}
	hmap->storage = storage;
	hmap->size = 0;
	hmap->capacity = MAP_PRESET_SIZE;
	hmap->threshold = (uint32_t) (hmap->capacity * MAP_DEFAULT_LOAD_FACTOR);
	hmap->fns = *fn;
	return hmap;
}

void hmap_free(hashmap* hmap) {
	hmap_clear(hmap, true);
	if (!hmap->storage) {
		free(hmap->table);
		free(hmap);
	} else if (hmap->type == HEAP_MEM) {
		mem_storage_put_block(hmap->storage, MEM_BLOCK_PTR(hmap->table));
		mem_storage_put_block(hmap->storage, MEM_BLOCK_PTR(hmap));
	}

}

void hmap_clear(hashmap* hmap, bool del_item) {
	int i = 0;
	hash_entry *entry = NULL;
	hash_entry *next = NULL;
	for (; i < hmap->capacity; ++i) {
		entry = hmap->table[i];
		while (entry != NULL) {
			next = entry->next;
			if (del_item) {
				hmap_entry_free(hmap, entry);
			} else {
				mem_free(entry, hmap->storage, hmap->type);
			}
			entry = next;
		}
		hmap->table[i] = entry;
	}
}

void hmap_resize(hashmap *hmap, int new_capacity) {
	hash_entry **old_table = hmap->table;
	int old_capacity = hmap->capacity;
	if (old_capacity == MAP_MAX_CAPACITY) {
		hmap->threshold = MAP_MAX_THRESHOLD;
		return;
	}
	if (hmap->storage) {
		int size = new_capacity * sizeof(hash_entry*);
		hmap->table = (hash_entry**) mem_storage_alloc(hmap->storage, size,
				hmap->type);
		memset(hmap->table, 0, size);
	} else {
		hmap->table = calloc(new_capacity, sizeof(hash_entry *));
	}
	if (!hmap->table) {
		fprintf_log(LOG_ERROR, "[%s] In %s() %d: Can't expands hashmap",
				__FILE__, __FUNCTION__, __LINE__);
	}
	//transfer entry to new table
	int j = 0;
	int i = 0;
	hash_entry *next = NULL;
	hash_entry *e = NULL;
	for (; j < old_capacity; j++) {
		e = old_table[j];
		while (e != NULL) {
			next = e->next;
			i = index_forhash(e->hash, new_capacity);
			e->next = hmap->table[i];
			hmap->table[i] = e;
			e = next;
		}
	}
	if (hmap->storage && hmap->type == HEAP_MEM) {
		mem_storage_put_block(hmap->storage, MEM_BLOCK_PTR(old_table));
	} else {
		free(old_table);
	}
	hmap->capacity = new_capacity;
	hmap->threshold = (int) (new_capacity * MAP_DEFAULT_LOAD_FACTOR);
}

object __hmap_put(hashmap* hmap, object k, object v) {
	hash_t hash = (hmap->fns).hash_key_fn(k);
	int i = index_forhash(hash, hmap->capacity);
	hash_entry *e = NULL;
	for (e = hmap->table[i]; e != NULL; e = e->next) {
		if (e->hash == hash && (e->k == k || (hmap->fns).eq_key_fn(e->k, k))) {
			object old_value = e->v;
			e->v = v;
			return old_value;
		}
	}
	hmap_add_entry(hmap, hash, k, v, i);
	return NULL;
}

object __hmap_get(hashmap* hmap, object k) {
	hash_t hash = (hmap->fns).hash_key_fn(k);
	int i = index_forhash(hash, hmap->capacity);
	hash_entry *e = NULL;
	for (e = hmap->table[i]; e != NULL; e = e->next) {
		if (e->hash == hash && (e->k == k || (hmap->fns).eq_key_fn(e->k, k))) {
			object v = e->v;
			return v;
		}
	}
	return NULL;
}

object __hmap_remove(hashmap *hmap, object k) {
	hash_t hash = (hmap->fns).hash_key_fn(k);
	int i = index_forhash(hash, hmap->capacity);
	hash_entry *prev = hmap->table[i];
	hash_entry *e = prev;
	hash_entry *next = NULL;
	while (e != NULL) {
		next = e->next;
		if (e->hash == hash && (e->k == k || (hmap->fns).eq_key_fn(e->k, k))) {
			hmap->size--;
			if (prev == e)
				hmap->table[i] = next;
			else
				prev->next = next;
			object value = e->v;
#ifdef MAP_DESTRUCTORS
			if ((hmap->fns).del_key_fn)
				(hmap->fns).del_key_fn(e->k, hmap->storage, hmap->type);
#else
			mem_free(e->k, hmap->storage, hmap->type);
#endif
			mem_free(e, hmap->storage, hmap->type);
			return value;
		}
		prev = e;
		e = next;
	}
	return NULL;
}
/*****************Implementation's Hash Map API End******************/

/*****************Implementation's Linked Hash Map API****************/

linkedmap * lmap_new(map_fn *fn, bool access_order, mem_storage *storage,
		mem_type type) {
	linkedmap* lmap = NULL;
	linked_entry *header = NULL;
	if (storage) {
		lmap = mem_storage_alloc(storage, sizeof(linkedmap), type);
		lmap->type = type;
		int size = MAP_PRESET_SIZE * sizeof(linked_entry*);
		lmap->table = mem_storage_alloc(storage, size, type);
		memset(lmap->table, 0, size);
		header = mem_storage_alloc(storage, sizeof(linkedmap), type);
		memset(header, 0, sizeof(linkedmap));
	} else {
		lmap = (linkedmap*) malloc(sizeof(linkedmap));
		lmap->table = calloc(MAP_PRESET_SIZE, sizeof(linked_entry*));
		header = calloc(1, sizeof(linked_entry));
	}
	hmap_entry_init(header, -1, NULL, NULL, NULL);
	header->before = header->after = header;
	lmap->header = header;
	lmap->access_order = access_order;
	lmap->storage = storage;
	lmap->size = 0;
	lmap->capacity = MAP_PRESET_SIZE;
	lmap->threshold = (uint32_t) (lmap->capacity * MAP_DEFAULT_LOAD_FACTOR);
	lmap->fns = *fn;
	return lmap;
}

/*free all data entry which a given hashmap links to. It free itself too*/
void lmap_free(linkedmap *map) {
	lmap_clear(map, true);
	if (!map->storage) {
		free(map->table);
		free(map->header);
		free(map);
	} else if (map->type == HEAP_MEM) {
		mem_storage_put_block(map->storage, MEM_BLOCK_PTR(map->table));
		mem_storage_put_block(map->storage, MEM_BLOCK_PTR(map->header));
		mem_storage_put_block(map->storage, MEM_BLOCK_PTR(map));
	}
}

void lmap_clear(linkedmap *map, bool del) {
	int i = 0;
	hash_entry *entry = NULL;
	hash_entry *next = NULL;
	for (; i < map->capacity; ++i) {
		entry = map->table[i];
		while (entry != NULL) {
			next = entry->next;
			if (del) {
				hmap_entry_free(map, entry);
			} else {
				mem_free(entry, map->storage, map->type);
			}
			entry = next;
		}
		map->table[i] = entry;
	}
	map->size = 0;
	map->header->before = map->header->after = map->header;
}

object lmap_put(linkedmap *map, object k, object v) {
	return lmap_put1(map, k, v, map->access_order);
}

void lmap_resize(linkedmap *map, int new_capacity) {
	hash_entry **old_table = map->table;
	int old_capacity = map->capacity;
	if (old_capacity == MAP_MAX_CAPACITY) {
		map->threshold = MAP_MAX_THRESHOLD;
		return;
	}
	if (map->storage) {
		int size = new_capacity * sizeof(hash_entry*);
		map->table = (hash_entry**) mem_storage_alloc(map->storage, size,
				map->type);
		memset(map->table, 0, size);
	} else {
		map->table = calloc(new_capacity, sizeof(hash_entry *));
	}
	if (!map->table) {
		fprintf_log(LOG_ERROR, "[%s] In %s() %d: Can't expands hashmap",
				__FILE__, __FUNCTION__, __LINE__);
	}

	//transfer entry to new table
	linked_entry *e = NULL;
	linked_entry *header = map->header;
	int index = 0;
	for (e = header->after; e != header; e = e->after) {
		index = index_forhash(e->hash, new_capacity);
		e->next = map->table[index];
		map->table[index] = (hash_entry *) e;
	}
	if (map->storage && map->type == HEAP_MEM) {
		mem_storage_put_block(map->storage, MEM_BLOCK_PTR(old_table));
	} else {
		free(old_table);
	}
	map->capacity = new_capacity;
	map->threshold = (int) (new_capacity * MAP_DEFAULT_LOAD_FACTOR);
}



#define lmap_add_entry(map, hash, k, v, index) { \
	linked_entry *new_entry = NULL; \
	if (map->storage) { \
		new_entry = mem_storage_alloc(map->storage, sizeof(linked_entry), map->type); \
	} else { \
		new_entry = malloc(sizeof(linked_entry)); \
	} \
	hmap_entry_init(new_entry, hash, k, v, map->table[index]); \
	map->table[index] = (hash_entry*) new_entry; \
	lmap_entry_add_before(new_entry, map->header); \
	if (map->size++ > map->threshold) { \
		lmap_resize(map, (map->capacity) << 1); \
	} \
}

object lmap_put1(linkedmap *map, object k, object v, bool access_order) {
	hash_t hash = (map->fns).hash_key_fn(k);
	int i = index_forhash(hash, map->capacity);
	linked_entry *e = NULL;
	for (e = (linked_entry*) map->table[i]; e != NULL; e
			= (linked_entry*) e->next) {
		if (e->hash == hash && (e->k == k || (map->fns).eq_key_fn(e->k, k))) {
			object old_value = e->v;
			e->v = v;
			if (access_order)
				lmap_record_access(map, e);
			return old_value;
		}
	}
	lmap_add_entry(map, hash, k, v, i);
	return NULL;
}

object lmap_get(linkedmap *map, object k) {
	return lmap_get1(map, k, map->access_order);
}

extern linked_entry *lmap_get_entry(linkedmap* map, object k) {
	hash_t hash = (map->fns).hash_key_fn(k);
	int i = index_forhash(hash, map->capacity);
	hash_entry *e = NULL;
	for (e = map->table[i]; e != NULL; e = e->next) {
		if (e->hash == hash && (e->k == k || (map->fns).eq_key_fn(e->k, k))) {
			return (linked_entry *) e;
		}
	}
	return NULL;
}

object lmap_get1(linkedmap *map, object k, bool access_order) {
	linked_entry *entry = lmap_get_entry(map, k);
	if (entry == NULL) {
		return NULL;
	}
	//record access
	if (access_order)
		lmap_record_access(map, entry);
	return entry->v;
}

extern linked_entry
* lmap_get_nextentry(linkedmap *map, linked_entry *cur_entry);

/* Add more method*/
linked_entry *lmap_add_entry_head(linkedmap *map, linked_entry *en){
	if (!map || !en)
		return NULL;

	int index = index_forhash(en->hash, map->capacity);
	en->next = map->table[index];
	map->table[index] = (hash_entry*)en;
	lmap_entry_add_before(en, map->header);
	if (map->size++ > map->threshold){
		lmap_resize(map, (map->capacity) << 1);
	}

	return en;
}

linked_entry *lmap_entry_deattach(linkedmap *map, linked_entry *en){
	if (!map || !en)
		return NULL;

	hash_t hash = (map->fns).hash_key_fn(en->k);
	int i = index_forhash(hash, map->capacity);
	hash_entry *prev = map->table[i];
	hash_entry *e = prev;
	hash_entry *next = NULL;
	while (e != NULL) {
		next = e->next;
		if (e->hash == hash && (e->k == en->k || (map->fns).eq_key_fn(e->k, en->k))) {
			map->size--;
			if (prev == e)
				map->table[i] = next;
			else
				prev->next = next;
			lmap_entry_remove_link((linked_entry *)e);
			//return (linked_entry *)e; <==> return en;
			return en;
		}
		prev = e;
		e = next;
	}

	return NULL;
}

void* lmap_remove(linkedmap *map, object k) {
	hash_t hash = (map->fns).hash_key_fn(k);
	int i = index_forhash(hash, map->capacity);
	hash_entry *prev = map->table[i];
	hash_entry *e = prev;
	hash_entry *next = NULL;
	while (e != NULL) {
		next = e->next;
		if (e->hash == hash && (e->k == k || (map->fns).eq_key_fn(e->k, k))) {
			map->size--;
			if (prev == e)
				map->table[i] = next;
			else
				prev->next = next;
			object value = e->v;
			lmap_entry_remove_link((linked_entry *)e);
#ifdef MAP_DESTRUCTORS
			if ((map->fns).del_key_fn)
				(map->fns).del_key_fn(e->k, map->storage, map->type);
#else
			mem_free(e->k, map->storage, map->type);
#endif
			mem_free(e, map->storage, map->type);
			return value;
		}
		prev = e;
		e = next;
	}
	return NULL;
}

/*****************Implementation's Linked Hash Map API End****************/

/**********************Concurrent Hash Map API*******************************/

void segment_init(segment *segm, int capacity) {
	int size = capacity * sizeof(concurr_entry*)
			+ LENGTH_STORAGE_SIZE;
	segm->table = malloc(size);
	memset(segm->table, 0, size);
	int *segm_cap = (int *) segm->table;
	*segm_cap = capacity;
	segm->threshold = (int) (capacity * MAP_DEFAULT_LOAD_FACTOR);
	segm->count = 0;
	segm->mod_count = 0;
	segm->free_entries = calloc(capacity, sizeof(concurr_entry*));
	segm->free_size = 0;
	pthread_mutex_init(&segm->lock, NULL);
}

concurrentmap * cmap_new(map_fn *fns, int capacity, int concurr_lvl) {
	concurrentmap *map = malloc(sizeof(concurrentmap));
	map->fns = * fns;
	if (concurr_lvl > MAP_MAX_SEGMENTS) {
		concurr_lvl = MAP_MAX_SEGMENTS;
	}
	// Find power-of-two sizes best matching arguments
	int sshift = 0;
	int ssize = 1;
	while (ssize < concurr_lvl) {
		++sshift;
		ssize <<= 1;
	}
	map->segm_shift = 32 - sshift;
	map->segm_mask = ssize - 1;
	map->segms = malloc(ssize * sizeof(segment));
	if (capacity > MAP_MAX_CAPACITY)
		capacity = MAP_MAX_CAPACITY;
	int c = capacity / ssize;
	if (c * ssize < capacity)
		++c;
	int cap = 1;
	while (cap < c)
		cap <<= 1;
	int i = 0;
	for (; i < ssize; ++i)
		segment_init(map->segms + i, cap);
	return map;
}

/**
 * Applies a supplemental hash function to a given hashCode, which
 * defends against poor quality hash functions.  This is critical
 * because ConcurrentHashMap uses power-of-two length hash tables,
 * that otherwise encounter collisions for hashCodes that do not
 * differ in lower or upper bits.
 */
hash_t hash(hash_t h) {
	// Spread bits to regularize both segment and index locations,
	// using variant of single-word Wang/Jenkins hash.
	h += (h << 15) ^ 0xffffcd7d;
	h ^= (h >> 10);
	h += (h << 3);
	h ^= (h >> 6);
	h += (h << 2) + (h << 14);
	return h ^ (h >> 16);
}


#define SEGMENT_FREE_DATA(segm) free((segm)->table)

#define concurr_entry_init(entry, key, hash_code, next_entry, value)  hmap_entry_init(entry, hash_code, key, value, next_entry)

#define SEGMENT_GET_FREE_ENTRY(entry_ptr, segm) { \
	if (segm->free_size > 0) { \
		--(segm->free_size); \
		entry_ptr = segm->free_entries[segm->free_size]; \
	} else { \
		entry_ptr = malloc(sizeof(concurr_entry)); \
	} \
}

#define SEGMENT_PUT_FREE_ENTRY(entry_ptr, segm) {\
	segm->free_entries[segm->free_size] = entry_ptr; \
	++(segm->free_size); \
}

#define GET_FIRST_ENTRY(first_entry, segm, hash_code) { \
		concurr_entry** tab = (concurr_entry **) segm->table; \
		int capacity = GET_LENGTH(tab); \
		tab = GET_REAL_TABLE(tab, concurr_entry**); \
		first_entry = tab[hash_code & (capacity - 1)]; \
}

void segment_rehash(concurrentmap *map, segment *segm) {
	concurr_entry **old_table = (concurr_entry **)segm->table;
	int old_capacity = GET_LENGTH(old_table);
	if (old_capacity >= MAP_MAX_CAPACITY)
		return;
	old_table = GET_REAL_TABLE(old_table, concurr_entry**);
	/*
	 * Reclassify nodes in each list to new Map.  Because we are
	 * using power-of-two expansion, the elements from each bin
	 * must either stay at same index, or move with a power of two
	 * offset. We eliminate unnecessary node creation by catching
	 * cases where old nodes can be reused because their next
	 * fields won't change. Statistically, at the default
	 * threshold, only about one-sixth of them need cloning when
	 * a table doubles. The nodes they replace will be garbage
	 * collectable as soon as they are no longer referenced by any
	 * reader thread that may be in the midst of traversing table
	 * right now.
	 */
	int new_capacity = (old_capacity << 1);
	segm->free_entries = realloc(segm->free_entries, new_capacity
			* sizeof(concurr_entry*));
	int i = 0;
	for (i = segm->free_size; i < new_capacity; i++) {
		segm->free_entries[i] = NULL;
	}
	int size = new_capacity * sizeof(concurr_entry*)
			+ LENGTH_STORAGE_SIZE;
	concurr_entry **new_table = malloc(size);
	memset(new_table, 0, size);
	SET_LENGTH(new_capacity, new_table);
	new_table = GET_REAL_TABLE(new_table, concurr_entry **);
	segm->threshold = (int) (new_capacity * MAP_DEFAULT_LOAD_FACTOR);
	int size_mask = new_capacity - 1;
	concurr_entry **free_holder = calloc(segm->count, sizeof(concurr_entry *));
	int counter = 0;
	for (i = 0; i < old_capacity; i++) {
		// We need to guarantee that any existing reads of old Map can
		//  proceed. So we cannot yet null out each bin.
		concurr_entry *e = old_table[i];

		if (e != NULL) {
			concurr_entry *next = e->next;
			int idx = e->hash & size_mask;

			//  Single node on list
			if (next == NULL)
				new_table[idx] = e;
			else {
				// Reuse trailing consecutive sequence at same slot
				concurr_entry *last_run = e;
				int last_idx = idx;
				concurr_entry *last = NULL;
				for (last = next; last != NULL; last = last->next) {
					int k = last->hash & size_mask;
					if (k != last_idx) {
						last_idx = k;
						last_run = last;
					}
				}
				new_table[last_idx] = last_run;

				// Clone all remaining nodes
				concurr_entry* p = NULL;
				concurr_entry* new_entry = NULL;
				for (p = e; p != last_run; p = p->next) {
					int k = p->hash & size_mask;
					concurr_entry* n = new_table[k];
					SEGMENT_GET_FREE_ENTRY(new_entry, segm);
					concurr_entry_init(new_entry, p->k, p->hash, n, p->v);
					new_table[k] = new_entry;
					free_holder[counter] = p;
					++counter;
				}
			}
		}
	}
	segm->table = GET_ABSTRACT_TABLE(new_table, volatile concurr_entry **);
	for (i = 0; i < counter; ++i) {
		SEGMENT_PUT_FREE_ENTRY(free_holder[i], segm);
	}
	free(free_holder);
	old_table = GET_ABSTRACT_TABLE(old_table, concurr_entry **);
	free(old_table);
}

object segment_put(concurrentmap *map, segment *segm, object k, hash_t hash_code,
		object v, bool only_if_absent) {
	pthread_mutex_lock(&segm->lock);
	int c = segm->count;
	if (c++ > segm->threshold) // ensure capacity
		segment_rehash(map, segm);
	concurr_entry** tab = (concurr_entry **)segm->table;
	int index = hash_code & (GET_LENGTH(tab) - 1);
	tab = GET_REAL_TABLE(tab, concurr_entry **);
	concurr_entry* first = tab[index];
	concurr_entry* e = first;
	while (e != NULL && (e->hash != hash_code || !(map->fns).eq_key_fn(k, e->k)))
		e = e->next;

	object old_value;
	if (e != NULL) {
		old_value = e->v;
		if (!only_if_absent)
			e->v = v;
	} else {
		old_value = NULL;
		++segm->mod_count;
		concurr_entry* new_entry;
		SEGMENT_GET_FREE_ENTRY(new_entry, segm);
		concurr_entry_init(new_entry, k, hash_code, first, v);
		tab[index] = new_entry;
		segm->count = c; // write-volatile
	}
	pthread_mutex_unlock(&segm->lock);
	return old_value;
}

object segment_put_nosyn(concurrentmap *map, segment *segm, object k, hash_t hash_code,
		object v, bool only_if_absent) {
	int c = segm->count;
	if (c++ > segm->threshold) // ensure capacity
		segment_rehash(map, segm);
	concurr_entry** tab = (concurr_entry **)segm->table;
	int index = hash_code & (GET_LENGTH(tab) - 1);
	tab = GET_REAL_TABLE(tab, concurr_entry **);
	concurr_entry* first = tab[index];
	concurr_entry* e = first;
	while (e != NULL && (e->hash != hash_code || !(map->fns).eq_key_fn(k, e->k)))
		e = e->next;

	object old_value;
	if (e != NULL) {
		old_value = e->v;
		if (!only_if_absent)
			e->v = v;
	} else {
		old_value = NULL;
		++segm->mod_count;
		concurr_entry* new_entry;
		SEGMENT_GET_FREE_ENTRY(new_entry, segm);
		concurr_entry_init(new_entry, k, hash_code, first, v);
		tab[index] = new_entry;
		segm->count = c; // write-volatile
	}
	return old_value;
}

object segment_replace(concurrentmap *map, segment *segm, object k, hash_t hash_code,
		object v) {
	pthread_mutex_lock(&segm->lock);
	concurr_entry* e;
	GET_FIRST_ENTRY(e, segm, hash_code);
	while (e != NULL && (e->hash != hash_code || !(map->fns).eq_key_fn(k, e->k)))
		e = e->next;

	object old_value = NULL;
	if (e != NULL) {
		old_value = e->v;
		e->v = v;
	}
	pthread_mutex_unlock(&segm->lock);
	return old_value;
}

object segment_get(concurrentmap *map, segment *segm, object k, hash_t hash_code) {
	if (segm->count != 0) { // read-volatile
		concurr_entry* e;
		GET_FIRST_ENTRY(e, segm, hash_code);
		while (e != NULL) {
			if (e->hash == hash_code && (map->fns).eq_key_fn(k, e->k)) {
				object v = e->v;
				if (v == NULL) {
					pthread_mutex_lock(&segm->lock); // recheck
					pthread_mutex_unlock(&segm->lock);
				}
				return v;;
			}
			e = e->next;
		}
	}
	return NULL;
}

object segment_remove(concurrentmap *map, segment *segm, object k, hash_t hash_code, object *orga_key) {
	pthread_mutex_lock(&segm->lock);
	int c = segm->count - 1;
	concurr_entry** tab = (concurr_entry **)segm->table;
	int capacity = GET_LENGTH(tab);
	tab = GET_REAL_TABLE(tab, concurr_entry **);
	int index = hash_code & (capacity - 1);
	concurr_entry* prev = tab[index];
	concurr_entry* e = prev;
	concurr_entry* next = NULL;
	object value = NULL;
	while (e != NULL) {
		next = e->next;
		if (e->hash == hash_code && (e->k == k || (map->fns).eq_key_fn(e->k, k))) {
			value = e->v;
			++segm->mod_count;
			if (prev == e) {
				tab[index] = next;
				segm->count = c;
			} else {
				prev->next = next;
				segm->count = c;
			}

			*orga_key = e->k;
			SEGMENT_PUT_FREE_ENTRY(e, segm);
		}
		prev = e;
		e = next;
	}
	pthread_mutex_unlock(&segm->lock);
	return value;
}

/**
 * Maps the specified key to the specified value in this table.
 * Neither the key nor the value can be null.
 *
 * <p> The value can be retrieved by calling the <tt>get</tt> method
 * with a key that is equal to the original key.
 *
 * @param key key with which the specified value is to be associated
 * @param value value to be associated with the specified key
 * @return the previous value associated with <tt>key</tt>, or
 *         <tt>null</tt> if there was no mapping for <tt>key</tt>
 */
object cmap_put(concurrentmap *map, object k, object v) {
	if (k == NULL || v == NULL) {
		AL_Error(AL_StsNullPtr, "Null Pointer");
	}
	hash_t hash_code = hash((map->fns).hash_key_fn(k));
	return segment_put(map, SEGMENT_FOR(map, hash_code), k, hash_code, v, false);
}

/**
 * Returns the value to which the specified key is mapped,
 * or {@code null} if this map contains no mapping for the key.
 *
 * <p>More formally, if this map contains a mapping from a key
 * {@code k} to a value {@code v} such that {@code key.equals(k)},
 * then this method returns {@code v}; otherwise it returns
 * {@code null}.  (There can be at most one such mapping.)
 *
 */
object cmap_get(concurrentmap* map, object k) {
	hash_t hash_code = hash((map->fns).hash_key_fn(k));
	return segment_get(map, SEGMENT_FOR(map, hash_code), k, hash_code);
}

/*
 If the specified key is not already associated with a value, associate it with the given value. This is equivalent to

 if (!map.containsKey(key))
 return map.put(key, value);
 else
 return map.get(key);

 except that the action is performed atomically.
 Specified by: putIfAbsent(...) in ConcurrentMap

 Parameters:
 key key with which the specified value is to be associated
 value value to be associated with the specified key
 Returns:
 the previous value associated with the specified key, or null if there was no mapping for the key*/
object cmap_put_absent(concurrentmap *map, object k, object v) {
	if (k == NULL || v == NULL) {
		AL_Error(AL_StsNullPtr, "Null Pointer");
	}
	hash_t hash_code = hash((map->fns).hash_key_fn(k));
	return segment_put(map, SEGMENT_FOR(map, hash_code), k, hash_code, v, true);
}

/*Replaces the entry for a key only if currently mapped to some value. This is equivalent to

 if (map.containsKey(key)) {
 return map.put(key, value);
 } else return null;

 except that the action is performed atomically.
 Specified by: replace(...) in ConcurrentMap

 Parameters:
 key key with which the specified value is associated
 value value to be associated with the specified key
 Returns:
 the previous value associated with the specified key, or null if there was no mapping for the key*/
object cmap_replace(concurrentmap *map, object k, object v) {
	if (k == NULL || v == NULL) {
		AL_Error(AL_StsNullPtr, "Null Pointer");
	}
	hash_t hash_code = hash((map->fns).hash_key_fn(k));
	return segment_replace(map, SEGMENT_FOR(map, hash_code), k, hash_code, v);
}

/**
 * Removes the key (and its corresponding value) from this map.
 * This method does nothing if the key is not in the map.
 *
 * @param  key the key that needs to be removed
 * @return the previous value associated with <tt>key</tt>, or
 *         <tt>null</tt> if there was no mapping for <tt>key</tt>
 */
extern object cmap_remove(concurrentmap *map, object k, object *orga_key) {
	hash_t hash_code = hash((map->fns).hash_key_fn(k));
	return segment_remove(map, SEGMENT_FOR(map, hash_code), k, hash_code, orga_key);
}

void cmap_clear(concurrentmap *map, bool del) {
	AL_Error(AL_StsNotSupport, "doesn't support");
}

void cmap_free(concurrentmap *map) {
	AL_Error(AL_StsNotSupport, "doesn't support");
}
/**********************Concurrent Hash Map API END*******************************/

#ifdef HMAP_MAKE_HASHFN
// Robert Jenkins' 32 bit integer hash function
hash_t int_hash_fn(object in) {
	static uint32_t a;
	a = *((uint32_t*) in);

	a = (a + 0x7ed55d16) + (a << 12);
	a = (a ^ 0xc761c23c) ^ (a >> 19);
	a = (a + 0x165667b1) + (a << 5);
	a = (a + 0xd3a2646c) ^ (a << 9);
	a = (a + 0xfd7046c5) + (a << 3);
	a = (a ^ 0xb55a4f09) ^ (a >> 16);

	return a;
}

bool int_eq_fn(object a, object b) {
	return *((int*) a) == *((int*) b) ? true : false;
}

// Dan Bernstein's string hash function (djb2)
hash_t str_hash_fn(object in) {
	hash_t hash = 0;
	char *str = in;
	for (; *str != '\0'; str++) {
		hash = 31 * hash + *str;
	}
	hash ^= (hash >> 20) ^ (hash >> 12);
	hash = hash ^ (hash >> 7) ^ (hash >> 4);
	return hash;
}

bool str_eq_fn(object a, object b) {
	return (strcmp((char*) a, (char*) b) == 0) ? true : false;
}

#endif
