/*	$Id: mpool.c 7 2008-07-29 02:58:11Z phrakt $	*/
/*
 * Copyright (c) 2004 Jean-Francois Brousseau <jfb@openbsd.org>
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. The name of the author may not be used to endorse or promote products
 *    derived from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 * Fixed-size memory pools
 * -----------------------
 *
 * The PF_MPOOL interface provides a simple way of managing large pools of
 * fixed-size memory chunks.  It has the main advantages of rapidly finding
 * free chunks and the possibility to block when no memory is available.
 * Additionnally, it provides a way for critical parts of an application to
 * lock a specified amount of memory that cannot be used by other parts of the
 * system, so the required memory is always available.
 */

#include <sys/types.h>
#include <sys/mman.h>

#include <unistd.h>
#include <stdlib.h>
#include <string.h>

#include "mpool.h"
#include "queue.h"
#include "pforge.h"
#include "private.h"


/* align an address */
#define MPOOL_ALIGNMENT sizeof(long)
#define MPOOL_ALIGN(x)  ((x) + (MPOOL_ALIGNMENT - 1)) & ~(MPOOL_ALIGNMENT - 1))


struct mp_chunk {
	void                 *mpc_hdl;  /* pointer to the chunk */
	SLIST_ENTRY(mp_chunk) mpc_list;
};

SLIST_HEAD(pf_mpcl, mp_chunk);

struct pf_mpool {
	int      mp_flags;
	void    *mp_pool;     /* pool handle */
	size_t   mp_psize;    /* pool size (in bytes, rounded up to page) */
	size_t   mp_csize;    /* chunk size */

	/* chunk statistics */
	unsigned int  mp_nchunks;
	unsigned int  mp_nused;
	unsigned int  mp_nalloc;

	/* chunk lists */
	struct pf_mpcl mp_used;
	struct pf_mpcl mp_free;

#ifdef PF_THREAD_SAFE
	/* pool mutex */
	pthread_mutex_t mp_mutex;

	/* condition on which to wait when no memory is available */
	pthread_cond_t mp_cond;
#endif

	/* start of the chunk array, the actual size is bogus */
	struct mp_chunk chunks[1];
};



/*
 * pf_mpool_alloc()
 *
 * Allocate a new memory pool which contains <nb> elements of fixed
 * size <clen>.
 * Returns a pointer to the allocated pool on success, or NULL on failure.
 */
PF_MPOOL*
pf_mpool_alloc(size_t clen, int nb, int flags)
{
	size_t plen, pslen;
	int i;
	int mmap_prot, mmap_flags, ret;
	uint8_t *pos;
	void *pmap;
	PF_MPOOL *p;

	plen = clen * nb;

	/*
	* pslen is the size required by the pool structure, which is not
	* sizeof(PF_MPOOL), because the <chunks> array is larger than what
	* it is declared as (it is really <nb> elements).
	*/
	pslen = sizeof(PF_MPOOL) + ((nb - 1) * sizeof(struct mp_chunk));

	/* map enough memory to also store the pool structure */
	plen += pslen;

	mmap_prot = PROT_READ|PROT_WRITE;
	mmap_flags = MAP_ANON | MAP_SHARED;

	pmap = mmap((void *)0, plen, mmap_prot, mmap_flags, -1, 0);
	if (pmap == MAP_FAILED) {
	}
	if (flags & MPF_HARD) {
		mlock(pmap, plen);
	}
	madvise(pmap, plen, MADV_RANDOM);

	/* Clear the POOL struct (without the chunks) */
	memset(pmap, 0, pslen);

	/* map the pool structure at the start of the memory region */
	p = (PF_MPOOL *)pmap;
	p->mp_flags = flags;
	p->mp_pool = (uint8_t *)pmap + pslen;
	p->mp_psize = plen;
	p->mp_csize = clen;
	p->mp_nchunks = nb;
	p->mp_nused = 0;

	SLIST_INIT(&(p->mp_used));
	SLIST_INIT(&(p->mp_free));

#ifdef PF_THREAD_SAFE
	ret = pthread_mutex_init(&(p->mp_mutex), NULL);
	if (ret != 0) {
		return (NULL);
	}
	ret = pthread_cond_init(&(p->mp_cond), NULL);
	if (ret != 0) {
		(void)pthread_mutex_destroy(&(p->mp_mutex));
	}
#endif

	/* set the chunks pointers, and add them to the free list */
	for (i = 0, pos = p->mp_pool; i < nb; i++, pos += clen) {
		p->chunks[i].mpc_hdl = pos;
		SLIST_INSERT_HEAD(&(p->mp_free),
		    &(p->chunks[i]), mpc_list);
	}

	return (p);
}


/*
 * pf_mpool_destroy()
 *
 * Destroy the pool <pool> and free all internal data and used resources.
 */
void
pf_mpool_destroy(PF_MPOOL *p)
{
	if (p != NULL) {
#ifdef PF_THREAD_SAFE
		pthread_mutex_destroy(&(p->mp_mutex));
		pthread_cond_destroy(&(p->mp_cond));
#endif
		(void)munmap(p, p->mp_psize);
	}
}


/*
 * pf_mpool_get()
 *
 * Allocate an item from the pool <pool>.  The <mode> argument
 * specifies how the pool allocator should behave.  For the moment, this
 * argument must be either MPALLOC_NOWAIT, in which case the function will
 * return with an error if no memory is available right away, or MPALLOC_WAIT
 * if the caller doesn't mind blocking until more memory is available.
 *
 * Returns a pointer to the newly allocated memory area on success, or NULL
 * on failure.
 */
void*
pf_mpool_get(PF_MPOOL *pool, int mode)
{
	void *ap;
	struct mp_chunk *mp;

	ap = NULL;
#ifdef PF_THREAD_SAFE
	if (pthread_mutex_lock(&(pool->mp_mutex)) != 0) {
		return (NULL);
	}
#endif
	if (pool->mp_nused == pool->mp_nchunks) {
		if (mode == MPALLOC_NOWAIT) {
#ifdef PF_THREAD_SAFE
			(void)pthread_mutex_unlock(&(pool->mp_mutex));
#endif
			return (NULL);
		} else if (mode == MPALLOC_WAIT) {
			/* sleep until some chunks get freed */
#ifdef PF_THREAD_SAFE
			if (pthread_cond_wait(&(pool->mp_cond),
			    &(pool->mp_mutex)) != 0) {
				return (NULL);
			}
#endif
			return (NULL);
		}
	}

	/* grab the first free chunk */
	mp = SLIST_FIRST(&(pool->mp_free));
	SLIST_REMOVE_HEAD(&(pool->mp_free), mpc_list);

	SLIST_INSERT_HEAD(&(pool->mp_used), mp, mpc_list);

	pool->mp_nused++;
	pool->mp_nalloc++;

#ifdef PF_THREAD_SAFE
	(void)pthread_mutex_unlock(&(pool->mp_mutex));
#endif
	ap = mp->mpc_hdl;
	if (pool->mp_flags & MPF_CLEAR) {
		memset(ap, 0, pool->mp_csize);
	}

	return (ap);
}


/*
 * pf_mpool_release()
 *
 * Release the memory chunk whose address is <aptr>.  To do this, we must
 * first look through the list of used chunks and find the proper chunk
 * structure, and then move it to the list of free chunks.
 */
void
pf_mpool_release(PF_MPOOL *pool, void *aptr)
{
	struct mp_chunk *mp, **pp;

	if (aptr == NULL) {
		return;
	}

#ifdef PF_THREAD_SAFE
	if (pthread_mutex_lock(&(pool->mp_mutex)) != 0) {
		return;
	}
#endif

	SLIST_FOREACH_PREVPTR(mp, pp, &(pool->mp_used), mpc_list) {
		if (mp->mpc_hdl == aptr) {
			if (pp == NULL)
				SLIST_REMOVE_HEAD(&(pool->mp_used), mpc_list);
			else
				(*pp)->mpc_list.sle_next = mp->mpc_list.sle_next;

			SLIST_INSERT_HEAD(&(pool->mp_free), mp, mpc_list);
			pool->mp_nused--;
			break;
		}
	}

#ifdef PF_THREAD_SAFE
	(void)pthread_mutex_unlock(&(pool->mp_mutex));
	(void)pthread_cond_signal(&(pool->mp_cond));
#endif
}
