#include "kernel/slab.h"

#ifndef TEST_DBG_WIN32
#include "heap.h"
#else
#include <stdio.h>
#define vmalloc(p)	malloc((p) * PAGE_SZ)
#define vfree(b, p)	free(b)
#endif

static kmem_cache_t slab_cache = {0};
static kmem_cache_t cache_cache = {0};
static avl_node_t* slab_avltree_header = NULL;
DECLARE_PRIVATE_SPINLOCK(slab_avltree_spinlock);

struct kcache_size
{
	size_t chunk_size;
	kmem_cache_t* mcache;
};

struct kcache_size kcache_table[] =
{
	// all the items must be in order from small to large
	{ 32, NULL },
	{ 64, NULL },
	{ 96, NULL },
	{ 128, NULL },
	{ 192, NULL },
	{ 256, NULL },
	{ 384, NULL },
	{ 512, NULL },
	{ 768, NULL },
	{ 1024, NULL },
	{ 1536, NULL },
	{ 2048, NULL },
	{ 3072, NULL },
	{ 4096, NULL }
};

void* kmalloc(size_t sz)
{
	uint i;
	const uint cnt = sizeof(kcache_table)
		/ sizeof(struct kcache_size);

	for (i = 0; i < cnt; ++i)
	{
		if (sz <= kcache_table[i].chunk_size)
			return kmem_cache_alloc(kcache_table[i].mcache);
	}
	return NULL;
}

// the sz shall already align with KMEM_CACHE_NODE_ALIGNMENT
static uint calc_slab_coloroff(size_t sz)
{
	const uint coloroff = (sizeof(slab_t) + 15) & ~15;
	if (sz < coloroff) return coloroff;
	if (sz < PAGE_SZ)
	{
		if ((PAGE_SZ % sz) < coloroff)
			return 0;
		else return coloroff;
	}
	else
	{
		if ((sz % PAGE_SZ) == 0)
			return 0;
		else return coloroff;
	}
}

// the sz shall already align with KMEM_CACHE_NODE_ALIGNMENT
static uint calc_slab_coloroff_with_max_items(size_t sz, size_t maxitems)
{
	const uint total_sz = sz * maxitems;
	const uint coloroff = (sizeof(slab_t) + 15) & ~15;
	const uint pgs_without_coloroff = (total_sz + PAGE_SZ - 1) / PAGE_SZ;
	const uint pgs_with_coloroff = (total_sz + coloroff + PAGE_SZ - 1) / PAGE_SZ;

	if (pgs_with_coloroff == pgs_without_coloroff)
		return coloroff;
	else return 0;
}

#define SLAB_MIN_ITEMS			(8)
#define SLAB_MAX_ITEMS			(256)
#define SLAB_GOOD_PAGES			(8)
#define SLAB_HEAVY_PAGES		(16)

// the return of this function indicates
// color-off
// the sz shall already align with KMEM_CACHE_NODE_ALIGNMENT
static uint calc_slab_pages(size_t sz, uint* pages, uint* objs, uint flags)
{
	uint items;
	uint coloroff = (flags & KMEM_CACHE_COMPACT)
		? ((sizeof(slab_t) + 15) & ~15)
		: calc_slab_coloroff(sz);

	// firstly, determine the pages
	// criteria:
	// 1) at least contain SLAB_MIN_ITEMS
	// 2) contain no more than SLAB_MAX_ITEMS
	// 3) try to control pages less than SLAB_GOOD_PAGES

	items = SLAB_MAX_ITEMS;
	if (sz * items + coloroff <= SLAB_GOOD_PAGES * PAGE_SZ)
		goto calc_pages_ok;

	// try 2/3 of max item
	items = (2 * SLAB_MAX_ITEMS + SLAB_MIN_ITEMS) / 3;
	if (sz * items + coloroff <= SLAB_GOOD_PAGES * PAGE_SZ)
		goto calc_pages_ok;
	
	// try 1/2 of max item
	items = (SLAB_MAX_ITEMS + SLAB_MIN_ITEMS) / 2;
	if (sz * items + coloroff <= SLAB_HEAVY_PAGES * PAGE_SZ)
		goto calc_pages_ok;

	// try 1/3 of max item
	items = (SLAB_MAX_ITEMS + 2 * SLAB_MIN_ITEMS) / 3;
	if (sz * items + coloroff <= SLAB_HEAVY_PAGES * PAGE_SZ)
		goto calc_pages_ok;

	// use min item
	items = SLAB_MIN_ITEMS;

calc_pages_ok:

	kassert(NULL != pages && NULL != objs);
	*pages = ((items * sz + coloroff + PAGE_SZ - 1) & ~(PAGE_SZ - 1)) / PAGE_SZ;
	*objs = (*pages * PAGE_SZ - coloroff) / sz;
	return coloroff;
}

static void do_init_kmem_cache(kmem_cache_t* cache, char *name, size_t sz, size_t maxitems, uint flags)
{
	size_t len;
	
	kassert(NULL != cache && NULL != name);
	len = strlen(name);
	if (len > KMEM_CACHE_NAME_MAX_LEN - 1)
	{
		memcpy(cache->name, name, KMEM_CACHE_NAME_MAX_LEN - 1);
		cache->name[KMEM_CACHE_NAME_MAX_LEN - 1] = '\0';
	}
	else strcpy(cache->name, name);

	// init list nodes
	listnode_init(cache->ownerlist);
	listnode_init(cache->slabs_partial);
	listnode_init(cache->slabs_empty);
	listnode_init(cache->slabs_full);

	sz = (sz < MIN_KMEM_CACHE_NODE_SIZE) ? MIN_KMEM_CACHE_NODE_SIZE : sz;
	cache->size = (sz + KMEM_CACHE_NODE_ALIGNMENT - 1) & ~(KMEM_CACHE_NODE_ALIGNMENT - 1);
	cache->coloroff = calc_slab_pages(cache->size, &(cache->pages), &(cache->objects), flags);

	cache->ctor = cache->dtor = NULL;
	if (!maxitems) return;

	// if user specify the max items for one slab
	// the "maxitem" is used for user to specify a smaller slab
	// so if the maxitem is larger than the one calculated by system
	// then we'll continue using the one calculated by system
	if (maxitems > cache->objects)
		return;

	// calculate the coloroff
	if (flags & KMEM_CACHE_COMPACT) cache->coloroff = (sizeof(slab_t) + 15) & ~15;
	else cache->coloroff = calc_slab_coloroff_with_max_items(cache->size, maxitems);

	cache->objects = maxitems;
	cache->pages = (cache->size * cache->objects + cache->coloroff + PAGE_SZ - 1) / PAGE_SZ;
}

kmem_cache_t* kmem_cache_create(char *name, size_t sz,
		size_t max_items_per_slab, uint flags,
		void (*ctor)(kmem_cache_t*, void*, size_t),
		void (*dtor)(kmem_cache_t*, void*, size_t))
{
	kmem_cache_t* m;

	if (!sz) return NULL;

	m = kmem_cache_alloc(&cache_cache);
	if (NULL == m) return NULL;

	init_spinlock(&(m->spinlock));

	spin_lock(&(m->spinlock));
	do_init_kmem_cache(m, (name) ? name : "<null>", sz, max_items_per_slab, flags);

	m->ctor = ctor;
	m->dtor = dtor;
	spin_unlock(&(m->spinlock));

	dbg_output3("<%s>: %u pages with %u objects (sz: %u)\n",\
		m->name, m->pages, m->objects, m->size);
	return m;
}

kmem_cache_t* easy_kmem_cache_create(char* name, size_t sz,
			void (*ctor)(kmem_cache_t*, void*, size_t),
			void (*dtor)(kmem_cache_t*, void*, size_t))
{
	return kmem_cache_create(name, sz, 0, 0, ctor, dtor);
}

static int slab_avltree_compare(avl_node_t* a, avl_node_t* b)
{
	kmem_cache_t* cache;
	slab_t *first = AVLNODE_ENTRY(slab_t, avlnode, a);
	slab_t *second = AVLNODE_ENTRY(slab_t, avlnode, b);

	cache = second->mcache;
	if ((uint)first->buffer < (uint)second->buffer)
		return -1;
	else if ((uint)first->buffer >= (uint)second->buffer + cache->pages * PAGE_SZ)
		return 1;
	else return 0;
}

static void kmem_cache_expand(kmem_cache_t* cache)
{
	void* buffer;
	slab_t *slab;

	kassert(NULL != cache);

	// allocate the buffer for pages
	buffer = (void*)vmalloc(cache->pages);
	if (NULL == buffer) return;

	// allocate the slab structure if necessary
	if (!cache->coloroff)
	{
		dbg_output3("kmem_cache_expand: allocate an external slab object.\n");
		slab = (slab_t *)kmem_cache_alloc(&slab_cache);
		if (NULL == slab)
		{
			vfree(buffer, cache->pages);
			return;
		}
	}
	else slab = (slab_t *)buffer;

	// initialize the slab
	slab->mcache = cache;
	slab->buffer = (void*)((uint)buffer + cache->coloroff);
	slab->first = 0;
	slab->freelist.next = NULL;
	slab->used = 0;

	listnode_add(cache->slabs_empty, slab->ownerlist);

	spin_lock(&slab_avltree_spinlock);
	avl_insert(&slab_avltree_header, &(slab->avlnode),
		slab_avltree_compare);
	spin_unlock(&slab_avltree_spinlock);
}

// this function is not locked
static void kmem_cache_move_to_full_list(kmem_cache_t* cache, slab_t *slab)
{
	kassert(NULL != cache && NULL != slab);
	listnode_del(slab->ownerlist);
	listnode_add(cache->slabs_full, slab->ownerlist);
}

// this function is not locked
static void kmem_cache_move_to_partial_list(kmem_cache_t* cache, slab_t *slab)
{
	kassert(NULL != cache && NULL != slab);
	listnode_del(slab->ownerlist);
	listnode_add(cache->slabs_partial, slab->ownerlist);
}

// this function is not locked
static void kmem_cache_move_to_empty_list(kmem_cache_t* cache, slab_t *slab)
{
	kassert(NULL != cache && NULL != slab);
	listnode_del(slab->ownerlist);
	listnode_add(cache->slabs_empty, slab->ownerlist);
}

static bool kmem_cache_is_empty_slab(kmem_cache_t* cache, slab_t *slab)
{
	kassert(NULL != slab);
	return (slab->used >= cache->objects) ? true : false;
}

// this function is not locked
// this function does not deal with slab status (like partial -> full)
static void* kmem_cache_slab_alloc(kmem_cache_t* cache, slab_t *slab)
{
	void* ret;

	kassert(NULL != slab);
	if (slab->first >= cache->objects)
	{
		slab_freelist_node_t *node;

		// see if there is any node in freelist
		if (NULL == slab->freelist.next)
			return NULL;

		// remove node from freelist
		node = slab->freelist.next;
		slab->freelist.next = node->next;
		ret = (void*)node;
	}
	else
	{
		uint idx = slab->first++;
		ret = (void*)(((uint)slab->buffer) + idx * cache->size);
	}

	slab->used++;
	return ret;
}

void* kmem_cache_alloc(kmem_cache_t *cache)
{
	void* ret;
	slab_t *slab;
	listnode_t* node;
	slab_t *empty_slab = NULL;

	if (NULL == cache)
		return NULL;

	spin_lock(&(cache->spinlock));

	if (listnode_isempty(cache->slabs_partial))
	{
		// see if there is any node in empty list
		if (listnode_isempty(cache->slabs_empty))
			kmem_cache_expand(cache);

		if (listnode_isempty(cache->slabs_empty))
			goto kmem_cache_alloc_error;

		node = cache->slabs_empty.next;
		slab = list_entry(slab_t, ownerlist, node);
		empty_slab = slab;
	}
	else
	{
		node = cache->slabs_partial.next;
		slab = list_entry(slab_t, ownerlist, node);
	}

	// allocate a node from slab
	ret = kmem_cache_slab_alloc(cache, slab);
	if (ret)
	{
		// re-arrange the slab to its list
		if (empty_slab)
			kmem_cache_move_to_partial_list(cache, slab);
		else if (kmem_cache_is_empty_slab(cache, slab))
			kmem_cache_move_to_full_list(cache, slab);
	}
	spin_unlock(&(cache->spinlock));

	// initialize the object
	if (cache->ctor) cache->ctor(cache, ret, cache->size);
	return ret;
	
kmem_cache_alloc_error:
	spin_unlock(&(cache->spinlock));
	return NULL;
}

// this function is not locked
static void kmem_cache_release_node(kmem_cache_t *cache, slab_t *slab, void* p)
{
	slab_freelist_node_t *node = (slab_freelist_node_t*)p;
	node->next = slab->freelist.next;
	slab->freelist.next = node;

	// if the slab is full before relaseing this node
	// we need to put it to partial list
	if (slab->used == cache->objects)
	{
		--slab->used;
		kmem_cache_move_to_partial_list(cache, slab);
		return;
	}

	--slab->used;
	// see if this become an empty slab
	if (!slab->used)
		kmem_cache_move_to_empty_list(cache, slab);
}

void kmem_cache_free(void *p)
{
	slab_t *slab;
	slab_t dummyslab;
	avl_node_t* node;

	if (NULL == p)
		return;

	// find the slab for this pointer
	dummyslab.buffer = p;

	// need lock
	spin_lock(&slab_avltree_spinlock);
	node = avl_find(slab_avltree_header, &dummyslab.avlnode, slab_avltree_compare);
	spin_unlock(&slab_avltree_spinlock);
	if (NULL == node)
	{
		dbg_output3("kmem_cache_free: pointer (%x) not found in slab.\n", (uint)p);
		return;
	}

	slab = AVLNODE_ENTRY(slab_t, avlnode, node);

	spin_lock(&(slab->mcache->spinlock));
	kmem_cache_release_node(slab->mcache, slab, p);
	spin_unlock(&(slab->mcache->spinlock));
}

static void init_cache_cache(void)
{
	init_spinlock(&(cache_cache.spinlock));

	// initialize "cache_cache"
	do_init_kmem_cache(&cache_cache, "cache_cache",
		sizeof(kmem_cache_t), MAX_ITEM_OF_CACHE_CACHE, 0);

	// initialize "slab_cache"
	do_init_kmem_cache(&slab_cache, "slab_cache",
		sizeof(slab_t), (PAGE_SZ / sizeof(slab_t)) - 1,
		KMEM_CACHE_COMPACT);
}

static void init_kcache_table(void)
{
	uint i;
	const uint total = sizeof(kcache_table)
		/ sizeof(struct kcache_size);

	for (i = 0; i < total; ++i)
	{
		char buf[32];
		sprintf(buf, "kcache_%u", kcache_table[i].chunk_size);
		kcache_table[i].mcache = easy_kmem_cache_create(
			buf, kcache_table[i].chunk_size, NULL, NULL);
		kassert(NULL != kcache_table[i].mcache);
	}
}

void global_init_kmem_cache_module(void)
{
	// init the cache_cache
	init_cache_cache();
	
	// init the kcache table for kmalloc/kfree
	init_kcache_table();
}

// test in win32
#ifdef TEST_DBG_WIN32

int slab_test(void)
{
	void* buf;

	global_init_kmem_cache_module();
	buf = kmem_cache_alloc(&cache_cache);
	kmem_cache_free(buf);

	{
		int i;
		void* bufar[256];
		void* buf257;

		for (i = 0; i < 255; ++i)
			bufar[i] = kmem_cache_alloc(&cache_cache);

		bufar[255] = kmem_cache_alloc(&cache_cache);
		buf257 = kmem_cache_alloc(&cache_cache);

		for (i = 0; i < 255; ++i)
			kmem_cache_free(bufar[i]);

		kmem_cache_free(bufar[255]);
		kmem_cache_free(buf257);
	}

	{
		void *p;
		p = kmalloc(16);
		kfree(p);
	}
	return 0;
}

#endif
/* EOF */
