#include "std/debug.h"
#include "kernel/vma.h"
#include "kernel/chunk.h"

#define vma2chkmgr(v)				((chunk_mgr_t *)((v)->addr))
#define _first_chunk(k)				((chunk_t*)((k) + 1))
#define _specify_chunk(k, i)		(_first_chunk(k) + (i))
#define _next(k, c)					(_first_chunk(k) + (c)->next_chunk)
#define _prev(k, c)					(_first_chunk(k) + (c)->prev_chunk)
#define _chunk_ptr(k, c)			((unsigned short)(c - _first_chunk(k)))
#define _chunk_start_addr(k, c)		(((uint)(k)) + (((uint)((k)->user_start)) + ((uint)((c)->addr))) * PAGE_SZ)
#define _chunk_end_addr(k, c)		(_chunk_start_addr(k, c) + ((uint)((c)->size)) * PAGE_SZ)
#define _set_chunk_next(k, c, s)	do { (c)->next_chunk = _chunk_ptr(k, s); } while (0)
#define _set_chunk_prev(k, c, s)	do { (c)->prev_chunk = _chunk_ptr(k, s); } while (0)

// t = total_pages
// s = sizeof(chunk_t)
// p = real pages for allocation
// h = sizeof(chunk_mgr_t)
// we have formula:
// ((s * p) / 2 + s + h) / PAGE_SZ + p = t
// finally we got:
// p = ((PAGE_SZ * t - s - h) * 2) / (PAGE_SZ * 2 + s)
bool init_chunk(vma_t *vma)
{
	uint real_pages;
	uint total_pages;
	chunk_t *chunk;
	chunk_mgr_t *chkmgr;

	kassert(NULL != vma && vma->size);
	total_pages = vma->size - 1;
	if (total_pages < 1) return false;

	real_pages = ((PAGE_SZ * total_pages - sizeof(chunk_t)
		- sizeof(chunk_mgr_t)) * 2) / (PAGE_SZ * 2 + sizeof(chunk_t));
	kassert(real_pages < vma->size);

	if (real_pages > 0xFFFF - 2) return false;

	// create the chunk manager
	chkmgr = vma2chkmgr(vma);
	kassert(NULL != chkmgr);
	chkmgr->user_start = vma->size - real_pages;

	// at the very beginning, free chunks always be 1
	chkmgr->free_chunks = 1;
	chkmgr->chunk_list = 0;
	chkmgr->free_user_pages = real_pages;
	init_spinlock(&chkmgr->spinlock);

	// create the first free chunk
	chunk = (chunk_t *)(chkmgr + 1);
	chunk->size = real_pages;
	chunk->addr = 0;
	_set_chunk_prev(chkmgr, chunk, chunk);
	_set_chunk_next(chkmgr, chunk, chunk);

	dbg_output3("init_chunk: real %u pages of total %u pages in vma [%08X-%08X]\n",
		real_pages, vma->size, vma->addr, vma->addr + vma->size * PAGE_SZ);
	return true;
}

static inline void chunk_revise_link(chunk_mgr_t* mgr, chunk_t *to, uint from_chk_ptr)
{
	chunk_t *prev = _prev(mgr, to);
	chunk_t *next = _next(mgr, to);
	kassert(prev != to && next != to);

	if (to->next_chunk == from_chk_ptr)
		_set_chunk_next(mgr, to, to);
	else _set_chunk_prev(mgr, next, to);
	if (to->prev_chunk == from_chk_ptr)
		_set_chunk_prev(mgr, to, to);
	else _set_chunk_next(mgr, prev, to);
}

static void heap_siftdown(chunk_mgr_t* mgr, uint i, uint n)
{
	uint j;
	chunk_t *a = _first_chunk(mgr);
	
	// temporary use the last chunk node
	// to save the value
	a[n] = a[i];
	chunk_revise_link(mgr, &a[n], i);

	while ((j = 2 * i + 1) < n)
	{
		if (j < n - 1 && a[j].size < a[j + 1].size)
			j++;

		if (a[n].size < a[j].size)
		{
			a[i] = a[j];
			chunk_revise_link(mgr, &a[i], j);
			i = j;
		}
		else break;
	}
	a[i] = a[n];
	chunk_revise_link(mgr, &a[i], n);
}

// this function need lock
static void chunk_resched(chunk_mgr_t *mgr)
{
	int i;	
	for (i = (mgr->free_chunks - 2) / 2; i >= 0; --i)
		heap_siftdown(mgr, i, mgr->free_chunks);
}

void* chunk_vmalloc(vma_t *vma, size_t pgs)
{
	uint addr;
	chunk_t *chunk;
	chunk_mgr_t *mgr;

	if (NULL == vma || !pgs)
		return NULL;

	mgr = vma2chkmgr(vma);

	// need lock
	spin_lock(&mgr->spinlock);

	if (!mgr->free_chunks) goto chunk_vmalloc_fail;
	chunk = _first_chunk(mgr);

	if (chunk->size < pgs)
	{
		if (mgr->free_chunks == 1)
			goto chunk_vmalloc_fail;

		// rebuild the heap (move the largest chunk be the first one)
		heap_siftdown(mgr, 0, mgr->free_chunks);
		if (chunk->size < pgs)
			goto chunk_vmalloc_fail;
	}

	// adjust the first (largest) chunk
	addr = _chunk_start_addr(mgr, chunk);
	chunk->size -= pgs;
	chunk->addr += pgs;
	
	kassert(mgr->free_user_pages > 0);
	mgr->free_user_pages -= pgs;

	// if this chunk is use out
	if (!chunk->size)
	{
		if (--mgr->free_chunks)
		{
			// adjust the header of chunk list
			if (mgr->chunk_list == _chunk_ptr(mgr, chunk))
				mgr->chunk_list = _chunk_ptr(mgr, _next(mgr, chunk));

			// invalidate the first "empty" chunk by
			// moving last one to first and rebuild heap
			// (1) move last one to first
			*chunk = chunk[mgr->free_chunks];
			chunk_revise_link(mgr, chunk, mgr->free_chunks);

			// (2) rebuild the heap
			// by doing so, the first chunk remains the largest one
			heap_siftdown(mgr, 0, mgr->free_chunks);
		}
		else
		{
			kassert(!mgr->free_user_pages);
			mgr->chunk_list = 0xFFFF;
		}
	}

	spin_unlock(&mgr->spinlock);
	return (void*)addr;

chunk_vmalloc_fail:
	spin_unlock(&mgr->spinlock);
	return NULL;
}

static void chunk_del(chunk_mgr_t* mgr, chunk_t *chunk)
{
	chunk_t *prev = _prev(mgr, chunk);
	chunk_t *next = _next(mgr, chunk);
	_set_chunk_next(mgr, prev, next);
	_set_chunk_prev(mgr, next, prev);
}

static void chunk_insert_before(chunk_mgr_t* mgr, chunk_t* chunk, chunk_t* next)
{
	chunk_t* prev = _prev(mgr, next);
	_set_chunk_next(mgr, chunk, next);
	_set_chunk_prev(mgr, chunk, prev);
	_set_chunk_prev(mgr, next, chunk);
	_set_chunk_next(mgr, prev, chunk);
}

static void chunk_insert_after(chunk_mgr_t* mgr, chunk_t* chunk, chunk_t* prev)
{
	chunk_t* next = _next(mgr, prev);
	_set_chunk_next(mgr, chunk, next);
	_set_chunk_prev(mgr, chunk, prev);
	_set_chunk_next(mgr, prev, chunk);
	_set_chunk_prev(mgr, next, chunk);
}

// this function need lock
// return:
// 0 means success + without heap whole rebuild
// 1 means success + w/ heap whole rebuild
// others means error
// note: mgr->free_chunks also adjusted
static int merge_chunk(chunk_mgr_t *mgr, uint addr, uint pgs)
{
	uint prev_end_addr;
	uint next_start_addr;
	chunk_t *prev, *next, *new_chunk;
	uint end_addr = addr + pgs * PAGE_SZ;

	next = _specify_chunk(mgr, mgr->chunk_list);

	// handle the first chunk
	next_start_addr = _chunk_start_addr(mgr, next);
	if (end_addr < next_start_addr)
	{
		new_chunk = _specify_chunk(mgr, mgr->free_chunks);
		new_chunk->size = pgs;
		new_chunk->addr = (addr - (uint)mgr) / PAGE_SZ - mgr->user_start;
		chunk_insert_before(mgr, new_chunk, next);

		// increase total chunks
		mgr->free_chunks++;

		// update the chunk list
		mgr->chunk_list = _chunk_ptr(mgr, new_chunk);
		return 1;
	}
	else if (end_addr == next_start_addr)
	{
		// merge with the chunk
		next->size += pgs;
		next->addr -= pgs;
		return (next == _first_chunk(mgr)) ? 0 : 1;
	}

	// handle all the wholes between chunk1 and chunk2	
	prev = next;
	next = _next(mgr, prev);
	while (_chunk_ptr(mgr, next) != mgr->chunk_list)
	{
		prev_end_addr = _chunk_end_addr(mgr, prev);
		next_start_addr = _chunk_start_addr(mgr, next);

		if (addr >= prev_end_addr && end_addr <= next_start_addr)
		{
			if (addr == prev_end_addr)
			{
				if (end_addr == next_start_addr)
				{
					// merge 3 chunks into 1 chunk
					prev->size += pgs + next->size;
					chunk_del(mgr, next);

					if (--mgr->free_chunks > _chunk_ptr(mgr, next))
					{
						// move last chunk to pos of next
						// and full rebuild the heap later
						*next = *_specify_chunk(mgr, mgr->free_chunks);
						chunk_revise_link(mgr, next, mgr->free_chunks);

						// adjust the chunk_list
						if (mgr->chunk_list == mgr->free_chunks)
							mgr->chunk_list = _chunk_ptr(mgr, next);

						return 1;
					}
					else return (prev == _first_chunk(mgr)) ? 0 : 1;
				}
				else
				{
					prev->size += pgs;
					return (prev == _first_chunk(mgr)) ? 0 : 1;
				}
			}
			else if (end_addr == next_start_addr)
			{
				// merge with next chunk
				next->size += pgs;
				next->addr -= pgs;
				return (next == _first_chunk(mgr)) ? 0 : 1;
			}
			else
			{
				// cannot merge with any one
				new_chunk = _specify_chunk(mgr, mgr->free_chunks);
				new_chunk->size = pgs;
				new_chunk->addr = (addr - (uint)mgr) / PAGE_SZ - mgr->user_start;
				chunk_insert_before(mgr, new_chunk, next);

				// increase total chunks
				mgr->free_chunks++;

				// need rebuild whole heap
				return 1;
			}
		}

		prev = next;
		next = _next(mgr, prev);
	}

	// finally see if we can merge with the prev chunk
	prev_end_addr = _chunk_end_addr(mgr, prev);
	if (prev_end_addr == addr)
	{
		prev->size += pgs;
		return (prev == _first_chunk(mgr)) ? 0 : 1;
	}
	else
	{
		new_chunk = _specify_chunk(mgr, mgr->free_chunks);
		new_chunk->size = pgs;
		new_chunk->addr = (addr - (uint)mgr) / PAGE_SZ - mgr->user_start;
		chunk_insert_after(mgr, new_chunk, prev);

		// increase total chunks
		mgr->free_chunks++;
		return 1;
	}
}

int chunk_vfree(vma_t* vma, void *_addr, uint pgs)
{
	int ret;
	chunk_t *chunk;
	chunk_mgr_t *mgr;
	uint addr = (uint)_addr;

	if (NULL == vma || NULL == _addr || !pgs)
		return ERR_BAD_PARAMETERS;

	mgr = vma2chkmgr(vma);

	// check if the addr is in range
	if (((addr - vma->addr) / PAGE_SZ) < mgr->user_start
		|| addr >= vma->addr + vma->size * PAGE_SZ)
		return ERR_OUT_OF_RANGE;

	chunk = _first_chunk(mgr);

	// need lock
	spin_lock(&mgr->spinlock);

	if (!mgr->free_chunks)
	{
		chunk->size = pgs;
		chunk->addr = (addr - vma->addr) / PAGE_SZ - mgr->user_start;
		_set_chunk_prev(mgr, chunk, chunk);
		_set_chunk_next(mgr, chunk, chunk);

		mgr->free_chunks++;
		mgr->chunk_list = _chunk_ptr(mgr, chunk);

		goto chunk_vfree_success;
	}

	// reap the free chunk
	ret = merge_chunk(mgr, addr, pgs);
	if (ret == 1 && mgr->free_chunks > 1)
		chunk_resched(mgr);
	else if (ret > 1) goto chunk_vfree_fail;

chunk_vfree_success:

	// increate the free user pages
	mgr->free_user_pages += pgs;
	spin_unlock(&mgr->spinlock);
	return 0;

chunk_vfree_fail:
	spin_unlock(&mgr->spinlock);
	return ret;
}

#ifdef TEST_DBG_WIN32
#include <stdio.h>

void output_chunk_list(chunk_mgr_t *mgr)
{
	chunk_t* next;

	// see if free chunk exists
	if (0xFFFF == mgr->chunk_list)
		return;

	next = _specify_chunk(mgr, mgr->chunk_list);
	do {
		printf("chunkid: %u, start: %u, size: %u\n",
			_chunk_ptr(mgr, next), next->addr, next->size);
		next = _next(mgr, next);
	} while (_chunk_ptr(mgr, next) != mgr->chunk_list);
}

int chunk_test(void)
{
	vma_t vma;
	unsigned int i, j;
	unsigned int *bufarray;
	void *a, *b, *c, *d, *e, *f, *g;
	void *buf = malloc(PAGE_SZ * 16);

	vma.addr = (uint)buf;
	vma.size = 32 * 1024 / 4;	// 256MB
	vma.vma_ops = NULL;

	init_chunk(&vma);
	a = chunk_vmalloc(&vma, 1);
	kassert(NULL != a);
	b = chunk_vmalloc(&vma, 10000);
	kassert(NULL == b);
	b = chunk_vmalloc(&vma, 32);
	kassert(NULL != b);
	c = chunk_vmalloc(&vma, 40);
	kassert(NULL != c);
	d = chunk_vmalloc(&vma, 32);
	kassert(NULL != d);
	e = chunk_vmalloc(&vma, 50);
	kassert(NULL != e);
	f = chunk_vmalloc(&vma, 60);
	kassert(NULL != f);
	g = chunk_vmalloc(&vma, 55);
	kassert(NULL != g);

	output_chunk_list(vma2chkmgr(&vma));
	puts("\n");

	chunk_vfree(&vma, g, 55);
	output_chunk_list(vma2chkmgr(&vma));
	puts("\n");

	chunk_vfree(&vma, e, 50);
	output_chunk_list(vma2chkmgr(&vma));
	puts("\n");

	chunk_vfree(&vma, f, 60);
	output_chunk_list(vma2chkmgr(&vma));
	puts("\n");

	chunk_vfree(&vma, a, 1);
	output_chunk_list(vma2chkmgr(&vma));
	puts("\n");

	chunk_vfree(&vma, c, 40);
	output_chunk_list(vma2chkmgr(&vma));
	puts("\n");

	chunk_vfree(&vma, d, 32);
	output_chunk_list(vma2chkmgr(&vma));
	puts("\n");

	chunk_vfree(&vma, b, 32);
	output_chunk_list(vma2chkmgr(&vma));
	puts("\n");

	bufarray = (unsigned int *)malloc(sizeof(unsigned int) * vma.size);
	for (i = 0; i < vma.size; ++i)
	{
		bufarray[i] = (unsigned int)chunk_vmalloc(&vma, 1);
		if (!bufarray[i]) break;
	}
	printf("malloc %u items.\n", i);

	for (j = 0; j < i; j += 2)
	{
		chunk_vfree(&vma, (void*)bufarray[j], 1);
		bufarray[j] = 0;
	}



	for (j = 0; j <= i; ++j)
	{
		if (bufarray[j])
		{
			chunk_vfree(&vma, (void*)bufarray[j], 1);
			if (j > 8000)
			{
				output_chunk_list(vma2chkmgr(&vma));
				printf("\n\n");
			}
		}
	}


	free(bufarray);
	free(buf);
	return 0;
}

#endif
/* EOF */
