/************************************************************************
 *				SRLOS Team CopyRight
 *@file		slab.c
 *@author	bloceanc
 *@date		7/20/2010
 *@note		kernel small memory block manager
 ************************************************************************/

#include "commdef.h"
#include "mt_mutex.h"
#include "mem_slab.h"
#include "mem_config.h"
#include "lib.h"

// global slab list.bigest 512B.there 512 / 4 = 128 rcds.
// and we don't use the first elemnts in array. so more one.
// we reserved first elements.then will option easy, and can
// extern some thing^_^.

static mem_slab_pool g_slab_pool[(MEM_SLAB_MAX_OBJ_SIZE>>2)+1];

/********************************************************************
 * 				EXTERN FUNCTION
 ********************************************************************/
extern void mt_mutex_initset(k_mutex *mutex);
extern int mt_mutex_lock(k_mutex *mutex, long long time);
extern void mt_mutex_unlock(k_mutex *mutex);

extern void comm_global_critical_section_enter(void);
extern void comm_global_critical_section_leave(void);

extern void __mmu_os_vritual_free(unsigned int page_viraddr, unsigned int page_count);
extern void *__mmu_os_virtual_allocate(unsigned int page_count);

/*
 *	initialize slab list
 */
void mem_slab_initialize(void)
{
	unsigned int i;
	for(i = 0; i < (MEM_SLAB_MAX_OBJ_SIZE>>2)+1; i++)
	{
		g_slab_pool[i].first_slab = NULL;
		g_slab_pool[i].free_count = 0;
		g_slab_pool[i].max_pagecount = MEM_SLAB_MAX_PAGE_COUNT;
		g_slab_pool[i].page_count = 0;
		mt_mutex_initset(&g_slab_pool[i].lock);
	}
}

/*
 *  add slab to list
 */
static void s_mem_slab_addlist(mem_slab *pslab)
{
	unsigned int objsize = pslab->obj_size;
	unsigned idx;
	if(objsize & 0x3 || objsize < 4)
	{
		// error, not reside 4bytes
		return;
	}
	idx = objsize >> 2;	// objsize / 4
	
	// kernel global lock
	comm_global_critical_section_enter();
	pslab->prev = NULL;
	if(g_slab_pool[idx].first_slab)
		g_slab_pool[idx].first_slab->prev = pslab;
	pslab->next = g_slab_pool[idx].first_slab;
	g_slab_pool[idx].first_slab = pslab;
	g_slab_pool[idx].free_count += pslab->free_count;
	g_slab_pool[idx].page_count += pslab->page_count;
	
	// unlock
	comm_global_critical_section_leave();
}

/*
 *	remove slab from list
 */
static void s_mem_slab_rmlist(mem_slab *pslab)
{
	unsigned int idx;
	if(pslab == NULL)
		return;
	
	idx = pslab->obj_size >> 2;
	// global lock
	comm_global_critical_section_enter();
	if(pslab->next == NULL && pslab->prev)
	{
		// this is the last one
		pslab->prev->next = NULL;
	}
	else if(pslab->next && pslab->prev == NULL)
	{
		// first one
		g_slab_pool[idx].first_slab = pslab->next;
		g_slab_pool[idx].first_slab->prev = NULL;
	}
	else if(pslab->next && pslab->prev)
	{
		pslab->next->prev = pslab->prev;
		pslab->prev->next = pslab->next;	
	}
	else if(pslab->next == NULL && pslab->prev == NULL)
	{
		g_slab_pool[idx].first_slab = NULL;
	}
	g_slab_pool[idx].free_count -= pslab->free_count;
	g_slab_pool[idx].page_count -= pslab->page_count;
	
	// unlock
	comm_global_critical_section_leave();
}

/*
 *	allocate new slab, and insert to the slab list
 * 	size	slab buffer size.
 */
mem_slab *s_mem_slab_create(unsigned int bufsize, unsigned int objsize)
{
	void *page;
	unsigned int count;
	mem_slab *slab;
	
	if(objsize < 4 || objsize >= MEM_SLAB_MAX_OBJ_SIZE )
	{
		// we just manager 4 side
		return NULL;
	}
	
	// reside objsize to 4bytes
	objsize = (objsize + 3)&~3;
	
	// bound 4KB
	bufsize = (bufsize + (MEM_CONFIG_PAGE_SIZE - 1))&~(MEM_CONFIG_PAGE_SIZE - 1);
	
	// allocate new slab buffer pages
	count = bufsize >> 12;
	page = __mmu_os_virtual_allocate(count);
	if(page == NULL)
	{
		return NULL;
	}
	
	slab = (mem_slab *)page;
	slab->freelist = NULL;
	slab->page_count = count;
	slab->obj_size = objsize;
	slab->start_object = (void *)((unsigned int)page + MEM_SLAB_HEADER_SIZE);
	
	// ((bufsize - MEM_SLAB_HEADER_SIZE)/(objsize + MEM_SLAB_CHUNK_HEADER_SIZE));
	slab->free_count = uldiv((bufsize - MEM_SLAB_HEADER_SIZE),(objsize + MEM_SLAB_CHUNK_HEADER_SIZE));
	slab->end_object = (unsigned int *)((unsigned int)slab->start_object + slab->free_count * (objsize + MEM_SLAB_CHUNK_HEADER_SIZE));	// not included
	// add to list
	s_mem_slab_addlist(slab);
	return slab;
}

/*
 * 	shunk slab. free slab pages
 */
static void s_mem_slab_shunk(mem_slab *pslab)
{
	if(pslab == NULL)
		return;
	
	// remove from list first
	s_mem_slab_rmlist(pslab);
	
	// free page
	__mmu_os_vritual_free(((unsigned int)pslab - MEM_SLAB_HEADER_SIZE), pslab->page_count);
}

/*
 * 	object allocator
 */
void *kernel_allocate(unsigned int size)
{
/*
 * 	首先从初始顺序块中分配，当大块都被分配完了后，再从自由链表中分配，
 * 因此，需要块在释放的时候直接释放到自由链表中
 */
	// bound 4B
	mem_slab_chunk *chunk = NULL;
	mem_slab *slab = NULL;
	k_mutex *slab_mutex = NULL;
	
	size = (size + 3)&~3;
	
	if(size >= MEM_SLAB_MAX_OBJ_SIZE)
	{
		// just permissioned to allocate small block!
		return NULL;
	}
	
	slab_mutex  = &g_slab_pool[size>>2].lock;
	
	// lock pool here!
	mt_mutex_lock(slab_mutex, -1);	// we must wait forever
	
	slab = g_slab_pool[size>>2].first_slab;
	for(;slab != NULL && slab->free_count ==0; slab = slab->next);
	if(slab)
	{
chunkalloc:
		// chech whether has never been allocated block
		if(slab->start_object != slab->end_object)
		{
			// there are. we allocate this first
			chunk = (mem_slab_chunk *)slab->start_object;
			slab->start_object += MEM_SLAB_CHUNK_HEADER_SIZE + size;
			slab->free_count--;
			
			// unlock here!
			mt_mutex_unlock(slab_mutex);
			return (chunk + 1);	// 不能返回chunk++，因为这样子是先返回后再加加，返回结果将是错误的。！！！！！！！！！！！！！！！！！
		}
		
		// there no more never been allocated block.
		// we allocate from free list
		chunk = (mem_slab_chunk *)slab->freelist;
		slab->freelist = chunk->slaborchunk.nextfree;	// link to next free chunk
		slab->free_count--;
		chunk->slaborchunk.slab = slab;
		
		// unlock here!
		mt_mutex_unlock(slab_mutex);
		return (chunk + 1);	// 不能返回chunk++，因为这样子是先返回后再加加，返回结果将是错误的。！！！！！！！！！！！！！！！！！
	}
	
	// there no more slab can use, we must create new one
	slab = s_mem_slab_create(MEM_SLAB_ALLOCATE_PAGE_COUNT << 12, size);
	if(slab)
	{
		goto chunkalloc;
	}
	
	// unlock here!
	mt_mutex_unlock(slab_mutex);
	
	return NULL;
}

/*
 *	free object chunk
 */
void kernel_free(void *address)
{
	mem_slab_chunk *chunk = NULL;
	mem_slab *slab = NULL;
	k_mutex *slab_mutex = NULL;
	
	if(address == NULL)
		return;
	chunk = (mem_slab_chunk *)((unsigned int)address - MEM_SLAB_CHUNK_HEADER_SIZE);
	slab = chunk->slaborchunk.slab;
	
	// lock pool , free object
	slab_mutex = &g_slab_pool[slab->obj_size>>2].lock;
	mt_mutex_lock(slab_mutex, -1);	// must wait forever
	chunk->slaborchunk.nextfree = slab->freelist;
	slab->freelist = chunk;
	slab->free_count++;
	
	// shunk slab pool if we can
	if(g_slab_pool[slab->obj_size>>2].page_count > g_slab_pool[slab->obj_size>>2].max_pagecount)
	{
		// check whether can shunk current slab
		unsigned int allsize = (slab->free_count + 1) * (slab->obj_size + 
									MEM_SLAB_CHUNK_HEADER_SIZE) + MEM_SLAB_HEADER_SIZE;	// inc for reserved!
		if(allsize >= (slab->page_count * MEM_CONFIG_PAGE_SIZE))
		{
			// we can shunk this slab, here will not occure deadlock, because they run in same thread!
			s_mem_slab_shunk(slab);
		}
	}
	// unlock here
	mt_mutex_unlock(slab_mutex);	/* we must use pointer, 
									 otherwise if shunk this slab, the data will be losed! */
}
