/**
 * Copyright (C) 2008+ Saifeng Zeng
 *		 2008+ Spark Zheng
 *
 * @file	src/mempool.c
 * @brief 
 *      A thread-safe memory-pool module. A generate infrastructure.
 *      -1. mpool (slab implementation)
 *      -2. marena
 *
 * @history
 *      version 0.0.1 (initialized by Spark Zheng, use arena as mpool)
 *      version 0.1.0 (enhanced by Saifeng Zeng, add really mpool module)
 *      version 0.2.0 (modified by Spark Zheng, accomplish mpool)
 *      version 0.2.2 (modified by Spark Zheng,
 *	      join arena and mpool in the module)
 *      version 0.3.0 (modified by Saifeng Zeng, change free algorithm,
 *	      abandon rbtree)
 *      version 0.3.1 (modified by Spark Zheng,
 *	      bug-fixed in dlib_mpool_realloc() function.)
 *      version 0.4.0 (modified by Spark Zheng,
 *	      tidy up as generate-utils moudel)
 *
 */

#include "mempool.h"
#include "lock.h"
#include "list.h"

#ifdef _DLIB_MPOOL_USE_RBTREE
#include "tree.h"
#endif

#define _dfree		dfree

/**
 * MPOOL module, the first implementation of mempool
 * memory pool:
 *      -orginaze chunk fragments in hashtable,
 *      -provide an interface like general memory apis.
 */

/** space of each node: 1024 * 128 (128K) */
#define MPOOL_SLUB_LENGTH	131072U
/** the max space of a mempool: 8192 * 128K = 1G */
#define MPOOL_BUCKETS_MAX_NUM	8192U

typedef enum {
	MPOOL_TIDY_TENDER = 1,	/** do tidyup tender */
	MPOOL_TIDY_FORCE	/** do tidyup force */
} mpool_tidy_t;

/** macro of calculate mpool buckets number */
#ifdef _DLIB_MPOOL_USE_FACTOR
#define MPOOL_BUCKETS_NUM(ret, min, max, iter)	do { \
		ret = 1; \
		while ((min) < (max)) { \
			ret++; \
			(min) = (min) * iter; \
		} } while (0)
#else
#define MPOOL_BUCKETS_NUM(ret, min, max, iter) \
		ret = (((max) + (iter) -1) - (min)) / (iter) + 1
#endif


#ifdef _DEBUG
struct mhstatics {
	uint32_t	mh_index;	/** mhead index */

	uint32_t	mh_expand_num;	/** mhead node number */
	uint64_t	mh_total_size;	/** mhead total size */
	uint64_t	mh_used_size;	/** mhead used size */

	uint64_t	mh_alloc_num;	/** mhead alloc/free num */
	uint64_t	mh_free_num;	/** mhead free num */

	dlib_lock_t	mh_statics_lock;/** mhstatics lock */
};
#endif

struct mpool {
	uint32_t	mp_id;		/** mpool identity */
	uint32_t	mp_status;	/** mpool status */

	uint32_t	mp_min_block;	/** mpool min block size */
	uint32_t	mp_max_block;	/** mpool max block size */

#ifdef _DLIB_MPOOL_USE_FACTOR
	float		mp_factor;	/** mpool block factor */
#else
	uint32_t	mp_addition;	/** mpool block addition */
#endif

	uint32_t	mp_buckets;	/** mpool bucket number */
	dlib_mhead_t	*buckets;	/** mpool buckets, hooked by dlib_mhead_t */

#ifdef _DLIB_MPOOL_USE_RBTREE
	rbtree_head_t	mp_rbhead;	/** mpool node location(use rbtree) */
#endif

#ifdef _DEBUG
	dlib_mhstatics_t	*statics;	/** mhead statics */
#endif
};

struct mhead {
	uint32_t	mh_index;	/** mhead bucket index */
	uint32_t	mh_block_size;	/** bucket block size */
	uint32_t	mh_node_number;	/** mhead node number */
	list_node_t	mh_head;	/** head */

	dlib_lock_t	mh_lock;	/** mhead lock for nodes */
};

struct mnode {
	dlib_mhead_t	*mh;		/** the head mhead pointer */
	uint32_t	next;		/** the free list pointer */
	uint32_t	block_num;	/** node block number */
	uint32_t	free_num;	/** node free block number */
	list_node_t	hl;		/** list node linked to mhead(head) */
#ifdef _DLIB_MPOOL_USE_RBTREE
	rbtree_node_t	rbnode;		/** rbtree node linked to mpool(root) */
#endif

	dlib_lock_t	mn_lock;	/** mnode lock */

	uint8_t		chunk[];	/** data flexibily array(the value) */
};

/**
 * MARENA module, the second implementation of mempool
 */

/*
 * marena_clean_fn
 * A method cleanup m
 * @m: arena entry need to be cleanup
 */
typedef void (*marena_clean_fn) (void *m);

/** 
 * Data chunks without reference count
 * If need reference count, then need to change the return of marenas.
 * For example, dlib_marena_calloc returns dlib_maheap_t * compare to void *.
 */
struct maheap {
	void		*chunk;		/** memory chunk,the main object */
	uint32_t	size;		/** total size of this heap */
	uint32_t	used;		/** used size of this heap */
};
/** A link-list for many chunks attached by a cleanup function */
struct mafree {
	list_node_t	l;		/** list_node */

	marena_clean_fn	clean_cb;	/** cleaner function */
	void		*m;		/** object which need to cleaner */
};

/**
 * MARENA main struct
 * A global array which preserve this struct is recommend.
 */
struct marena {
	uint32_t	mp_id;		/** identity of marena */
	uint32_t	original_size;	/** origina size of marena */
	uint32_t	size;		/** total size of marena */

	dlib_mafree_t	*cleaner;	/** list of using heaps */
	dlib_maheap_t	*heap;		/** current working heap in marena */
};

/* mpool-and-marena id */
static uint32_t mp_id = 0;

/** mpool implementation */

#ifdef _DLIB_MPOOL_USE_RBTREE
/** 
 * _mpool_compare
 * @brief red-black tree compare function.
 * @param one: first element
 * @param two: second element
 */
static int _mpool_compare(const void *one, const void *two);
#endif

/** 
 * _mpool_bucket_index
 * @brief judge the index of buckets by chunk size.
 * @param mp: which mpool
 * @param size: chunk size
 */
static int _mpool_bucket_index(dlib_mpool_t *mp, uint32_t size);

/** 
 * _mhead_init
 * @brief constructure of mhead struct.
 * @param mp: which mpool
 */
static int _mhead_init(dlib_mpool_t *mp);

/** 
 * _mhead_exit
 * @brief destructure of mhead struct.
 * @param mp: which mpool
 */
static void _mhead_exit(dlib_mpool_t *mp);

#ifdef _DEBUG
/**
 * _mhstatics_init
 * @brief constructure of mhstatics struct.
 * @param mp: which mpool
 */
static int _mhstatics_init(dlib_mpool_t *mp);

/** 
 * _mhstatics_init
 * @brief destructure of mhstatics struct.
 * @param mp: which mpool
 */
static void _mhstatics_exit(dlib_mpool_t *mp);
#endif

/**
 * _mnode_init
 * @brief constructure of mnode struct.
 * @param mh: which mhead
 */
static dlib_mnode_t *_mnode_init(dlib_mhead_t *mh);

/** 
 * _mnode_exit
 * @brief destructure of mnode struct.
 * @param mn: which mnode
 */
static void _mnode_exit(dlib_mnode_t *mn);

/** 
 * _mpool_tidyup_mhead
 * @brief tidyup each mnode in mhead.
 * @param mp: which mpool
 * @param mh: which mhead
 * @param type: do it on force or in tender
 */
static int _mpool_tidyup_mhead(dlib_mpool_t *mp, dlib_mhead_t *mh,
			       mpool_tidy_t type);

#ifdef _DLIB_MPOOL_USE_RBTREE
/** needed by rbtree for insert && search */
static inline int _mpool_compare(const void *one, const void *two)
{
	unsigned long a = (const unsigned long) one;
	unsigned long b = (const unsigned long) two;

	if (a <= b && (a + MPOOL_SLUB_LENGTH) > b) {
		return 0;
	} else if ((a + MPOOL_SLUB_LENGTH) <= b) {
		return -1;
	} else {
		return 1;
	}
}
#endif

/** macro of calculate mpool buckets index by chunk size */
static inline int _mpool_bucket_index(dlib_mpool_t *mp, uint32_t size)
{
#ifdef _DLIB_MPOOL_USE_FACTOR
	uint32_t i;
	int ret = -1;

	/* maybe need use binary-search */
	for (i = 0; i < mp->mp_buckets; i++) {
		if (size <= mp->buckets[i].mh_block_size) {
			ret = i;
			break;
		}
	}
	return ret;
#else
	int ret;
	uint32_t min = mp->mp_min_block;
	uint32_t len = mp->mp_buckets;
	uint32_t addition = mp->mp_addition;

	if (size <= min)
		return 0;
	ret = (size - min - 1) / addition + 1;
	if (ret > (int) (len - 1))
		return -1;
	return ret;
#endif
}

static inline int _mhead_init(dlib_mpool_t *mp)
{
	uint32_t i;
#ifdef _DLIB_MPOOL_USE_FACTOR
	uint32_t block_size_temp = mp->mp_min_block;
#endif
	if (mp == NULL)
		return -1;

	for (i = 0; i < mp->mp_buckets; i++) {
		mp->buckets[i].mh_index = i;
#ifdef _DLIB_MPOOL_USE_FACTOR
		if (i == 0) {
			mp->buckets[i].mh_block_size = block_size_temp;
		} else {
			mp->buckets[i].mh_block_size =
				block_size_temp * mp->mp_factor;
			block_size_temp = mp->buckets[i].mh_block_size;
		}
#else
		mp->buckets[i].mh_block_size =
			mp->mp_min_block + i * mp->mp_addition;
#endif
		mp->buckets[i].mh_node_number = 0;
		LIST_HEAD_INIT(&(mp->buckets[i].mh_head));
		dlib_lock_init(&(mp->buckets[i].mh_lock));
	}

	return 0;
}

static inline void _mhead_exit(dlib_mpool_t *mp)
{
	uint32_t i;

	if (mp == NULL)
		return;

	for (i = 0; i < mp->mp_buckets; i++) {
		dlib_lock_exit(&(mp->buckets[i].mh_lock));
	}

	return;
}

#ifdef _DEBUG
static inline int _mhstatics_init(dlib_mpool_t *mp)
{
	uint32_t i;

	if (mp == NULL)
		return -1;

	for (i = 0; i < mp->mp_buckets + 1; i++) {
		mp->statics[i].mh_index = i;
		mp->statics[i].mh_expand_num = 0;
		mp->statics[i].mh_total_size = 0;
		mp->statics[i].mh_used_size = 0;
		mp->statics[i].mh_alloc_num = 0;
		mp->statics[i].mh_free_num = 0;
		dlib_lock_init(&(mp->statics[i].mh_statics_lock));
	}

	return 0;
}

static inline void _mhstatics_exit(dlib_mpool_t *mp)
{
	uint32_t i;

	if (mp == NULL)
		return;
	//dlib_mpool_dump(mp);

	for (i = 0; i < mp->mp_buckets + 1; i++) {
		dlib_lock_exit(&(mp->statics[i].mh_statics_lock));
	}

	return;
}
#endif

static inline dlib_mnode_t *_mnode_init(dlib_mhead_t *mh)
{
	dlib_mnode_t *node = NULL;
	uint32_t tmp = MPOOL_SLUB_LENGTH / mh->mh_block_size;

#ifdef _DLIB_MPOOL_USE_RBTREE
	node = dmalloc(sizeof (dlib_mnode_t) + MPOOL_SLUB_LENGTH);
#else
	node = dmalloc(sizeof (dlib_mnode_t) + MPOOL_SLUB_LENGTH +
		       tmp * sizeof (long));
#endif
	node->mh = mh;
	node->next = 0;
	node->block_num = tmp;
	node->free_num = tmp;
	node->hl.next = NULL;
	node->hl.prev = NULL;

#ifdef _DLIB_MPOOL_USE_RBTREE
	/* rbnode needn't init */
#endif

	dlib_lock_init(&(node->mn_lock));

	return node;
}

static inline void _mnode_exit(dlib_mnode_t *mn)
{
	if (mn == NULL) return;
	dlib_lock_exit(&mn->mn_lock);
	_dfree(mn);
	return;
}

static int _mpool_tidyup_mhead(dlib_mpool_t *mp, dlib_mhead_t *mh,
			       mpool_tidy_t type)
{
	list_node_t *n = NULL;
	list_node_t *pos = NULL;
	dlib_mnode_t *tpos = NULL;

	dlib_lock_lock(&mh->mh_lock);
	LIST_FOREACH_ENTRY_SAFE(tpos, pos, n, &(mh->mh_head), hl) {
		/* when node is empty */
		if (type == MPOOL_TIDY_FORCE ||
		    (type == MPOOL_TIDY_TENDER &&
		     tpos->free_num == tpos->block_num)) {
			LIST_DEL(&tpos->hl);
#ifdef _DLIB_MPOOL_USE_RBTREE
			rb_delete(&mp->mp_rbhead, &tpos->rbnode);
#endif
			mh->mh_node_number--;
#ifdef _DEBUG
			dlib_lock_lock(&(mp->statics[mh->mh_index].mh_statics_lock));
			mp->statics[mh->mh_index].mh_total_size -= MPOOL_SLUB_LENGTH;
			dlib_lock_unlock(&(mp->statics[mh->mh_index].mh_statics_lock));
#endif
			_mnode_exit(tpos);
		}
	}
	dlib_lock_unlock(&mh->mh_lock);

	return 0;
}

/** Memory pool allocators interface */
#ifdef _DLIB_MPOOL_USE_FACTOR
dlib_mpool_t *dlib_mpool_init(uint32_t min, uint32_t max, float fact)
{
	dlib_mpool_t *mp = NULL;
#ifdef _DLIB_MPOOL_USE_RBTREE
	dlib_mnode_t node_temp;	/* for rbtree head */
	int32_t ks;		/* for rbtree head */
	int32_t vs;		/* for rbtree head */
#endif

	if (min > max || min * fact <= min)
		return NULL;

	mp = (dlib_mpool_t *) dmalloc(sizeof (dlib_mpool_t));
	mp->mp_id = mp_id++;
	mp->mp_min_block = min;
	mp->mp_max_block = max;
	mp->mp_factor = fact;

	MPOOL_BUCKETS_NUM(mp->mp_buckets, min, max, fact);

	if (mp->mp_buckets > MPOOL_BUCKETS_MAX_NUM) {
		_dfree(mp);
		mp = MEM_POISON;
		return NULL;
	}

	mp->buckets = dmalloc(mp->mp_buckets * sizeof (dlib_mhead_t));
	_mhead_init(mp);

#ifdef _DEBUG
	mp->statics = dmalloc((mp->mp_buckets + 1) * sizeof (dlib_mhstatics_t));
	_mhstatics_init(mp);
#endif

#ifdef _DLIB_MPOOL_USE_RBTREE
	ks = (int32_t) & (node_temp.rbnode) - (int32_t) & (node_temp.chunk);
	vs = ks;

	ret = rb_init(&mp->mh_rbhead, _mpool_compare, ks, vs);
	if (ret < 0) {
		dlib_mpool_exit(mp);
		return NULL;
	}
#endif
	mp->mp_status = 0;

	return mp;
}
#else
dlib_mpool_t *dlib_mpool_init(uint32_t min, uint32_t max, uint32_t addition)
{
	dlib_mpool_t *mp = NULL;

#ifdef _DLIB_MPOOL_USE_RBTREE
	int ret;
	dlib_mnode_t node_temp;	/* for rbtree head */
	int32_t ks;		/* for rbtree head */
	int32_t vs;		/* for rbtree head */
#endif

	if (min > max)
		return NULL;

	mp = (dlib_mpool_t *) dmalloc(sizeof (dlib_mpool_t));
	mp->mp_id = mp_id++;
	mp->mp_min_block = min;
	mp->mp_max_block = max;
	mp->mp_addition = addition;

	MPOOL_BUCKETS_NUM(mp->mp_buckets, min, max, addition);

	if (mp->mp_buckets > MPOOL_BUCKETS_MAX_NUM) {
		_dfree(mp);
		mp = MEM_POISON;
		return NULL;
	}

	mp->buckets = dmalloc(mp->mp_buckets * sizeof (dlib_mhead_t));
	_mhead_init(mp);

#ifdef _DEBUG
	mp->statics = dmalloc((mp->mp_buckets + 1) * sizeof (dlib_mhstatics_t));
	_mhstatics_init(mp);
#endif

#ifdef _DLIB_MPOOL_USE_RBTREE
	ks = (int32_t) & (node_temp.rbnode) - (int32_t) & (node_temp.chunk);
	vs = ks;

	ret = rb_init(&mp->mp_rbhead, _mpool_compare, ks, vs);
	if (ret < 0) {
		dlib_mpool_exit(mp);
		return NULL;
	}
#endif
	mp->mp_status = 0;

	return mp;
}
#endif

void *dlib_mpool_malloc(dlib_mpool_t *mp, uint32_t size)
{
	int mh_index;
	void *chunk = NULL;
	dlib_mnode_t *node = NULL;
	dlib_mnode_t *tpos = NULL;

	uint32_t tmp = 0;

	if (mp == NULL || size == 0)
		return NULL;

	mh_index = _mpool_bucket_index(mp, size);

#ifdef _DLIB_MPOOL_USE_RBTREE
	/** mh_index < 0 means size is too large for this mpool, just raw alloc */
	if (mh_index < 0) {
#ifdef _DEBUG
		chunk = dmalloc(size + sizeof (uint32_t));
		*(uint32_t *) chunk = size;

		dlib_lock_lock(&(mp->statics[mp->mp_buckets].mh_statics_lock));
		mp->statics[mp->mp_buckets].mh_total_size +=
			(size + sizeof (uint32_t));
		mp->statics[mp->mp_buckets].mh_used_size +=
			(size + sizeof (uint32_t));
		mp->statics[mp->mp_buckets].mh_alloc_num++;
		dlib_lock_unlock(&(mp->statics[mp->mp_buckets].mh_statics_lock));

		return chunk + sizeof (uint32_t);
#else
		chunk = dmalloc(size);
		return chunk;
#endif
	}

	/* alloc from mpool */
	dlib_lock_lock(&(mp->buckets[mh_index].mh_lock));
	/* no node in head */
	if (LIST_EMPTY(&(mp->buckets[mh_index].mh_head))) {
		tpos = NULL;
	} else { /* all node is full */
		tpos = LIST_FIRST_ENTRY(&(mp->buckets[mh_index].mh_head),
					dlib_mnode_t, hl);
		if (tpos->free_num == 0) {
			tpos = NULL;
		}
	}

	if (tpos == NULL) {
		node = _mnode_init(mp->buckets + mh_index);
		/* link to mhead list */
		LIST_ADD_HEAD(&(mp->buckets[mh_index].mh_head), &node->hl);
		/* link to mpool rbtree */
		rb_insert(&mp->mp_rbhead, &node->rbnode, RB_EXACT);

		mp->buckets[mh_index].mh_node_number++;
#ifdef _DEBUG
		dlib_lock_lock(&(mp->statics[mh_index].mh_statics_lock));
		mp->statics[mh_index].mh_expand_num++;
		mp->statics[mh_index].mh_total_size += MPOOL_SLUB_LENGTH;
		dlib_lock_unlock(&(mp->statics[mh_index].mh_statics_lock));
#endif
		tpos = LIST_FIRST_ENTRY(&(mp->buckets[mh_index].mh_head),
					dlib_mnode_t, hl);
	}

	dlib_lock_lock(&(tpos->mn_lock));
	/* must be "<", because next comes from 0 to (block_num - 1) */
	chunk = tpos->chunk + tpos->next * tpos->mh->mh_block_size;
	tmp = *((uint32_t *) chunk);

	/**
	 * tmp == 0 means first use(init status), OR, next must turn to (tmp - 1)
	 * need tmp - 1, because 0 means first use and the first chunk, colision,
	 * so may write next + 1 into it.
	 */
	if (tmp == 0) {
		tpos->next++;
	} else {
		tpos->next = tmp - 1;
	}
	tpos->free_num--;

	if (tpos->free_num == 0 && tpos->mh->mh_node_number != 1) {
	/* full && put this node into tail */
		LIST_DEL(&tpos->hl);
		LIST_ADD_TAIL(&(mp->buckets[mh_index].mh_head), &tpos->hl);
	}

	dlib_lock_unlock(&(tpos->mn_lock));
	dlib_lock_unlock(&(mp->buckets[mh_index].mh_lock));

#ifdef _DEBUG
	dlib_lock_lock(&(mp->statics[mh_index].mh_statics_lock));
	mp->statics[mh_index].mh_used_size += tpos->mh->mh_block_size;
	mp->statics[mh_index].mh_alloc_num++;
	dlib_lock_unlock(&(mp->statics[mh_index].mh_statics_lock));
#endif
	return chunk;

#else
	/* mh_index < 0 means size is too large for this mpool, just raw alloc */
	if (mh_index < 0) {
#ifdef _DEBUG
		chunk = dmalloc(size + sizeof (long) + sizeof (uint32_t));
		*(uint32_t *) chunk = size;
		*(long *) (chunk + sizeof (uint32_t)) = 0;

		dlib_lock_lock(&(mp->statics[mp->mp_buckets].mh_statics_lock));
		mp->statics[mp->mp_buckets].mh_total_size +=
			(size + sizeof (long) + sizeof (uint32_t));
		mp->statics[mp->mp_buckets].mh_used_size +=
			(size + sizeof (long) + sizeof (uint32_t));
		mp->statics[mp->mp_buckets].mh_alloc_num++;
		dlib_lock_unlock(&(mp->statics[mp->mp_buckets].mh_statics_lock));

		return chunk + sizeof (uint32_t) + sizeof (long);
#else
		chunk = dmalloc(size + sizeof (long));
		*(long *) (chunk) = 0;
		return chunk + sizeof (long);
#endif
	}

	/* alloc from mpool */
	dlib_lock_lock(&(mp->buckets[mh_index].mh_lock));
	/* no node in head */
	if (LIST_EMPTY(&(mp->buckets[mh_index].mh_head))) {
		tpos = NULL;
	} else { /* all node is full */
		tpos = LIST_FIRST_ENTRY(&(mp->buckets[mh_index].mh_head),
					dlib_mnode_t, hl);
		if (tpos->free_num == 0) {
			tpos = NULL;
		}
	}

	if (tpos == NULL) {
		node = _mnode_init(mp->buckets + mh_index);
		/* link to mhead list */
		LIST_ADD_HEAD(&(mp->buckets[mh_index].mh_head), &node->hl);

		mp->buckets[mh_index].mh_node_number++;
#ifdef _DEBUG
		dlib_lock_lock(&(mp->statics[mh_index].mh_statics_lock));
		mp->statics[mh_index].mh_expand_num++;
		mp->statics[mh_index].mh_total_size += 
			(MPOOL_SLUB_LENGTH + sizeof (long) * node->block_num);
		dlib_lock_unlock(&(mp->statics[mh_index].mh_statics_lock));
#endif
		tpos = LIST_FIRST_ENTRY(&(mp->buckets[mh_index].mh_head),
					dlib_mnode_t, hl);
	}

	dlib_lock_lock(&(tpos->mn_lock));
	/* must be "<", because next comes from 0 to (block_num - 1) */
	chunk = tpos->chunk + 
		tpos->next * (sizeof (long) + tpos->mh->mh_block_size);
	tmp = *((uint32_t *) (chunk + sizeof (long)));

	/**
	 * tmp == 0 means first use(init status), OR, next must turn to (tmp - 1)
	 * need tmp - 1, because 0 means first use and the first chunk, colision,
	 * so may write next + 1 into it.
	 */
	if (tmp == 0) {
		tpos->next++;
		*(long *) chunk = (long) tpos;
	} else {
		tpos->next = tmp - 1;
	}
	tpos->free_num--;

	if (tpos->free_num == 0 && tpos->mh->mh_node_number != 1) {
		LIST_DEL(&tpos->hl);
		LIST_ADD_TAIL(&(mp->buckets[mh_index].mh_head), &tpos->hl);
	}

	dlib_lock_unlock(&(tpos->mn_lock));
	dlib_lock_unlock(&(mp->buckets[mh_index].mh_lock));

#ifdef _DEBUG
	dlib_lock_lock(&(mp->statics[mh_index].mh_statics_lock));
	mp->statics[mh_index].mh_used_size +=
		tpos->mh->mh_block_size + sizeof (long);
	mp->statics[mh_index].mh_alloc_num++;
	dlib_lock_unlock(&(mp->statics[mh_index].mh_statics_lock));
#endif
	return chunk + sizeof (long);
#endif
}

void *dlib_mpool_calloc(dlib_mpool_t *mp, uint32_t num, uint32_t size)
{
	void *chunk = NULL;

	chunk = dlib_mpool_malloc(mp, num * size);
	if (chunk != NULL)
		memset(chunk, 0, num * size);

	return chunk;
}

void *dlib_mpool_realloc(dlib_mpool_t *mp, void *m, uint32_t size)
{
	void *chunk = NULL;
	dlib_mnode_t *mn;
	uint32_t m_size;

#ifdef _DLIB_MPOOL_USE_RBTREE
	rbtree_node_t *rbn;
#else
	long tmp;
#endif

	if (mp == NULL)
		return NULL;
	if (m == NULL)
		return dlib_mpool_malloc(mp, size);

#ifdef _DLIB_MPOOL_USE_RBTREE

	rbn = rb_find(&mp->mp_rbhead, NULL, RB_SEARCH_EQ, chunk);
	if (rbn == NULL) { /* just raw realloc, and dfree  */
#ifdef _DEBUG
		m_size = *((uint32_t *) (m - sizeof (uint32_t)));

		dlib_lock_lock(&(mp->statics[mp->mp_buckets].mh_statics_lock));
		mp->statics[mp->mp_buckets].mh_total_size += size - m_size;
		mp->statics[mp->mp_buckets].mh_used_size += size - m_size;
		dlib_lock_unlock(&(mp->statics[mp->mp_buckets].mh_statics_lock));

		chunk = drealloc(m - sizeof (uint32_t), size + sizeof (uint32_t));
		*(uint32_t *) chunk = size;
#else
		chunk = drealloc(m, size);
#endif
		return chunk;
	}

	/* mpool alloced */
	mn = RB_ENTRY(rbn, dlib_mnode_t, rbnode);
	m_size = mn->mh->mh_block_size;

	if (size == m_size)
		return m;

	chunk = dlib_mpool_malloc(mp, size);
	if (chunk != NULL) {
		size > m_size ?
			memcpy(chunk, m, m_size) : memcpy(chunk, m, size);
		dlib_mpool_free(mp, m);
	}

	return chunk;
#else
	tmp = *(long *) (m - sizeof (long));
	if (tmp == 0) { /* just raw realloc, and dfree  */
#ifdef _DEBUG
		m_size = *((uint32_t *) (m - sizeof(long) - sizeof(uint32_t)));

		dlib_lock_lock(&(mp->statics[mp->mp_buckets].mh_statics_lock));
		mp->statics[mp->mp_buckets].mh_total_size += (size - m_size);
		mp->statics[mp->mp_buckets].mh_used_size += (size - m_size);
		dlib_lock_unlock(&(mp->statics[mp->mp_buckets].mh_statics_lock));

		chunk = drealloc(m - sizeof (long) - sizeof (uint32_t),
				 size + sizeof (long) + sizeof (uint32_t));
		*(uint32_t *) chunk = size;
		*(long *) (chunk + sizeof (uint32_t)) = 0;
		return chunk + sizeof (long) + sizeof (uint32_t);
#else
		chunk = drealloc(m - sizeof (long), size + sizeof (long));
		return chunk + sizeof (long);
#endif
	}

	/* mpool alloced */
	mn = (dlib_mnode_t *) tmp;
	m_size = mn->mh->mh_block_size;

	if (size == m_size)
		return m;

	chunk = dlib_mpool_malloc(mp, size);
	if (chunk != NULL) {
		size > m_size ? memcpy(chunk, m, m_size) : memcpy(chunk, m, size);
		dlib_mpool_free(mp, m);
	}

	return chunk;

#endif
}

void dlib_mpool_free(dlib_mpool_t *mp, void *chunk)
{
	dlib_mnode_t *mn;

#ifdef _DLIB_MPOOL_USE_RBTREE
	rbtree_node_t *rbn;
#else
	long tmp;
#endif

#ifdef _DEBUG
	uint32_t size;
#endif

	if (mp == NULL || chunk == NULL)
		return;

#ifdef _DLIB_MPOOL_USE_RBTREE

	rbn = rb_find(&mp->mp_rbhead, NULL, RB_SEARCH_EQ, chunk);
	if (rbn == NULL) {
		/* not in all buckets, but raw alloced, need to free it directly */
#ifdef _DEBUG
		size = *((uint32_t *) (chunk - sizeof (uint32_t)));

		dlib_lock_lock(&(mp->statics[mp->mp_buckets].mh_statics_lock));
		mp->statics[mp->mp_buckets].mh_total_size -=
		    (size + sizeof (uint32_t));
		mp->statics[mp->mp_buckets].mh_used_size -=
		    (size + sizeof (uint32_t));
		mp->statics[mp->mp_buckets].mh_free_num++;
		dlib_lock_unlock(&(mp->statics[mp->mp_buckets].mh_statics_lock));

		_dfree(chunk - sizeof (uint32_t));
#else
		_dfree(chunk);
#endif
		return;
	}

	/* mpool alloced just free the chunk to mnode */
	mn = RB_ENTRY(rbn, dlib_mnode_t, rbnode);
	//memset(chunk, 0, mn->mh->mh_block_size);
	
	dlib_lock_lock(&(mn->mh->mh_lock));
	dlib_lock_lock(&(mn->mn_lock));

	/* mn->next + 1, see the recomments in dlib_mpool_malloc() */
	*(uint32_t *) chunk = mn->next + 1;
	mn->next = ((uint32_t) chunk - (uint32_t) mn->chunk) /
		   mn->mh->mh_block_size;
	mn->free_num++;

	if (mn->free_num == 1 && mn->mh->mh_node_number != 1) {
		LIST_DEL(&mn->hl);
		LIST_ADD_HEAD(&(mn->mh->mh_head), &mn->hl);
	}

	/**
	 * if need to check the node is empty and destroy it, add codes here,
	 * code likes this: if (mn->free_num == mn->block_num) {...}
	 * but i think that was not worth doing,
	 * add a tidyup function to do this by needs.
	 */
	dlib_lock_unlock(&(mn->mn_lock));
	dlib_lock_unlock(&(mn->mh->mh_lock));

#ifdef _DEBUG
	dlib_lock_lock(&(mp->statics[mn->mh->mh_index].mh_statics_lock));
	mp->statics[mn->mh->mh_index].mh_used_size -= mn->mh->mh_block_size;
	mp->statics[mn->mh->mh_index].mh_free_num++;
	dlib_lock_unlock(&(mp->statics[mn->mh->mh_index].mh_statics_lock));
#endif
	return;

#else /* rbtree */
	tmp = *(long *) (chunk - sizeof (long));
	
	if (tmp == 0) {
		/* not in all buckets, but raw alloced, need to free it directly */
#ifdef _DEBUG
		size = *((uint32_t *)(chunk - sizeof(long) - sizeof(uint32_t)));

		dlib_lock_lock(&(mp->statics[mp->mp_buckets].mh_statics_lock));
		mp->statics[mp->mp_buckets].mh_total_size -=
			(size + sizeof (long) + sizeof (uint32_t));
		mp->statics[mp->mp_buckets].mh_used_size -=
			(size + sizeof (long) + sizeof (uint32_t));
		mp->statics[mp->mp_buckets].mh_free_num++;
		dlib_lock_unlock(&(mp->statics[mp->mp_buckets].mh_statics_lock));

		_dfree(chunk - sizeof (long) - sizeof (uint32_t));
#else
		_dfree(chunk - sizeof (long));
#endif
		return;
	}

	/* mpool alloced just free the chunk to mnode */
	mn = (dlib_mnode_t *)tmp;
	//memset(chunk, 0, mn->mh->mh_block_size);

	dlib_lock_lock(&(mn->mh->mh_lock));
	dlib_lock_lock(&(mn->mn_lock));
	/* mn->next + 1, see the recomments in dlib_mpool_malloc() */
	*(uint32_t *) chunk = mn->next + 1;
	mn->next = ((uint32_t) chunk - (uint32_t) mn->chunk) /
		   (mn->mh->mh_block_size + sizeof (long));
	mn->free_num++;

	if (mn->free_num == 1 && mn->mh->mh_node_number != 1) {
		LIST_DEL(&mn->hl);
		LIST_ADD_HEAD(&(mn->mh->mh_head), &mn->hl);
	}

	/**
	 * if need to check the node is empty and destroy it, add codes here,
	 * code likes this: if (mn->free_num == mn->block_num) {...}
	 * but i think that was not worth doing,
	 * add a tidyup function to do this by needs.
	 */
	dlib_lock_unlock(&(mn->mn_lock));
	dlib_lock_unlock(&(mn->mh->mh_lock));

#ifdef _DEBUG
	dlib_lock_lock(&(mp->statics[mn->mh->mh_index].mh_statics_lock));
	mp->statics[mn->mh->mh_index].mh_used_size -=
		(mn->mh->mh_block_size + sizeof (long));
	mp->statics[mn->mh->mh_index].mh_free_num++;
	dlib_lock_unlock(&(mp->statics[mn->mh->mh_index].mh_statics_lock));
#endif
	return;
#endif
}

int dlib_mpool_tidyup(dlib_mpool_t *mp, dlib_mhead_t *mh)
{
	int ret = 0;
	uint32_t i;

	if (mp == NULL)
		return -1;

	if (mh != NULL)
		return _mpool_tidyup_mhead(mp, mh, MPOOL_TIDY_TENDER);

	for (i = 0; i < mp->mp_buckets; i++) {
		ret += _mpool_tidyup_mhead(mp, mp->buckets + i,
					   MPOOL_TIDY_TENDER);
	}

	return ret;
}

void dlib_mpool_exit(dlib_mpool_t *mp)
{
	uint32_t i;

	if (mp == NULL)
		return;

#ifdef _DLIB_MPOOL_USE_RBTREE
	/* just do noting */
	rb_exit(&mp->mp_rbhead, NULL, NULL);
#endif

	for (i = 0; i < mp->mp_buckets; i++) {
		_mpool_tidyup_mhead(mp, mp->buckets + i, MPOOL_TIDY_FORCE);
	}

#ifdef _DEBUG
	_mhstatics_exit(mp);
	_dfree(mp->statics);
#endif

	_mhead_exit(mp);
	_dfree(mp->buckets);
	_dfree(mp);

	mp = MEM_POISON;

	return;
}

#ifdef _DEBUG
void dlib_mpool_dump(dlib_mpool_t *mp, FILE *fd)
{
	uint32_t i;
	uint64_t total_size = 0;
	uint64_t used_size = 0;
	uint64_t alloc_num = 0;
	uint64_t free_num = 0;

	if (mp == NULL)
		return;
	if (fd == NULL)
		fd = stdout;

#ifdef _DLIB_MPOOL_USE_FACTOR
	fprintf(fd, "MPOOL dump out:\n"
		"    mp_id		: %u\n"
		"    status 		: %u\n"
		"    mp_min_block	: %u\n"
		"    mp_max_block	: %u\n"
		"    mp_factor		: %f\n"
		"    mp_buckets		: %u\n\n",
		mp->mp_id, mp->mp_status, mp->mp_min_block,
		mp->mp_max_block, mp->mp_factor, mp->mp_buckets);
#else
	fprintf(fd, "MPOOL dump out:\n"
		"    mp_id		: %u\n"
		"    status 		: %u\n"
		"    mp_min_block	: %u\n"
		"    mp_max_block	: %u\n"
		"    mp_addition	: %u\n"
		"    mp_buckets		: %u\n\n",
		mp->mp_id, mp->mp_status, mp->mp_min_block,
		mp->mp_max_block, mp->mp_addition, mp->mp_buckets);
#endif

	for (i = 0; i < mp->mp_buckets + 1; i++) {
		dlib_lock_lock(&(mp->statics[i].mh_statics_lock));
		fprintf(fd, "BUCKET #%d\t"
			"    mh_expand_num 		: %u, "
			"    mh_total_size		: %llu, "
			"    mh_used_size		: %llu, "
			"    mh_alloc_num		: %llu, "
			"    mh_free_num		: %llu\n",
			mp->statics[i].mh_index, mp->statics[i].mh_expand_num,
			mp->statics[i].mh_total_size,
			mp->statics[i].mh_used_size,
			mp->statics[i].mh_alloc_num,
			mp->statics[i].mh_free_num);

		total_size += mp->statics[i].mh_total_size;
		used_size += mp->statics[i].mh_used_size;
		alloc_num += mp->statics[i].mh_alloc_num;
		free_num += mp->statics[i].mh_free_num;
		dlib_lock_unlock(&(mp->statics[i].mh_statics_lock));
	}

	fprintf(fd, "\nTHE TOTAL STATICS:\n"
		"    total_size		: %llu\n"
		"    used_size		: %llu\n"
		"    alloc_num		: %llu\n"
		"    free_num		: %llu\n",
		total_size, used_size, alloc_num, free_num);

	return;
}
#endif

/** marena implementation */

/**
 * _mafree_new
 * @brief Generate a new mafree structure
 * @param f: cleaner function
 * @param m: cleaner object
 */
static dlib_mafree_t *_mafree_new(marena_clean_fn f, void *m);

/**
 * _maheap_new
 * @brief Generate a new maheap structure
 * @param ma: marena including maheap
 * @param size: maheap size
 */
static inline dlib_maheap_t *_maheap_new(dlib_marena_t *ma, uint32_t size);

/**
 * _maheap_cleaner
 * @brief Callback function
 * @param h: cleaner object
 */
static void _maheap_cleaner(void *h);

/**
 * _marena_append_mafree
 * @brief Append a mafree to mafree list
 * @param ma: marena
 * @param f: mafree structure
 */
static inline void _marena_append_mafree(dlib_marena_t *ma, dlib_mafree_t *f);

static inline dlib_mafree_t *_mafree_new(marena_clean_fn c, void *m)
{
	dlib_mafree_t *f;

	f = dmalloc(sizeof (dlib_mafree_t));
	f->clean_cb = c;
	f->m = m;

	LIST_HEAD_INIT(&f->l);

	return f;
}

static void _maheap_cleaner(void *h)
{
	dlib_maheap_t *heap = (dlib_maheap_t *) h;

	_dfree(heap->chunk);
	_dfree(heap);

	return;
}

static inline void _marena_append_mafree(dlib_marena_t *ma, dlib_mafree_t *f)
{
	if (ma && ma->cleaner == NULL) {
		ma->cleaner = f;
		return;
	}

	LIST_ADD_TAIL(&ma->cleaner->l, &f->l);

	return;
}

static dlib_maheap_t *_maheap_new(dlib_marena_t *ma, uint32_t size)
{
	dlib_maheap_t *h;
	dlib_mafree_t *f;

	h = dmalloc(sizeof (dlib_maheap_t));
	h->chunk = dmalloc(size);
	h->size = size;
	h->used = 0;

	ma->original_size = size;
	ma->size += size;

	f = _mafree_new(_maheap_cleaner, (void *) h);

	_marena_append_mafree(ma, f);

	return h;
}

dlib_marena_t *dlib_marena_new()
{
	dlib_marena_t *ma;

	ma = dmalloc(sizeof (dlib_marena_t));
	ma->mp_id = mp_id;
	mp_id++;
	ma->original_size = 0;
	ma->size = 0;
	ma->cleaner = NULL;
	ma->heap = NULL;

	return ma;
}

dlib_marena_t *dlib_marena_new_with_heap(uint32_t size)
{
	dlib_marena_t *ma = dlib_marena_new();
	ma->heap = _maheap_new(ma, size);

	return ma;
}

void dlib_marena_free(dlib_marena_t *ma)
{
	dlib_mafree_t *tpos;
	list_node_t *pos;
	list_node_t *n;

	if (ma == NULL)
		return;

	LIST_FOREACH_ENTRY_SAFE(tpos, pos, n, &ma->cleaner->l, l) {
		(*tpos->clean_cb)(tpos->m);
		_dfree(tpos);
	}
	if (ma->cleaner) {
		(*ma->cleaner->clean_cb) (ma->cleaner->m);
		_dfree(ma->cleaner);
	}

	_dfree(ma);

	return;
}

void dlib_marena_tidyup(dlib_marena_t *ma)
{
	if (ma == NULL)
		return;
	/**
	 * TODO:
	 * do something tidyup 
	 * such as clean up cleaner.
	 */

	return;
}

void *dlib_marena_calloc(dlib_marena_t *ma, uint32_t size)
{
	void *chunk = NULL;
	const list_node_t *ptr;

	if (ma == NULL)
		return NULL;

	/* if no space of this pool or too big, just raw alloc */
	if (ma->heap == NULL || size > (ma->heap->size / 2)) {
		chunk = dmalloc(size);
		ma->size += size;

		dlib_mafree_t *f = _mafree_new(_dfree, chunk);
		_marena_append_mafree(ma, f);

		return chunk;
	}

	/* check the boundary, 4 Byte alignment */
	if (size > 4) {
		while (ma->heap->used & 7 && ma->heap->used < ma->heap->size) {
			ma->heap->used++;
		}
		/* no heap space left, just new a heap */
		if (ma->heap->used == ma->heap->size) {
			ma->heap = _maheap_new(ma, ma->original_size);
			ptr = &ma->cleaner->l;
			ma->cleaner = LIST_LAST_ENTRY(ptr, dlib_mafree_t, l);
		}
	}

	/* not enough space left, then do raw alloc */
	if (size > (ma->heap->size - ma->heap->used)) {
		chunk = dmalloc(size);
		ma->size += size;

		dlib_mafree_t *f = _mafree_new(_dfree, chunk);
		_marena_append_mafree(ma, f);

		return chunk;
	} else {
		chunk = (char *) ma->heap->chunk + ma->heap->used;
		ma->heap->used += size;
	}

	return chunk;
}

uint32_t dlib_marena_size(dlib_marena_t *ma)
{
	if (ma == NULL)
		return 0;
	return ma->size;
}

