#include "vma.h"
#include "page.h"
#include "heap.h"
#include "slab.h"
#include "chunk.h"
#include "init/startup.h"
#include "arch/pgd.h"

// public variables
vma_mgr_t kernel_vma_mgr;

vma_t krnl_low_premapped_area1;
vma_t kernel_area;
vma_t page_entry_table_area;
vma_t krnl_proc_preoccupied_area;
vma_t krnl_low_premapped_area2;

// for win32 test
#ifdef TEST_DBG_WIN32

static uint win32_test_idle_thread_stack;
static uint win32_test_page_root_dir;
static uint win32_test_end_of_krnl_preoccupied_area;

#define idle_thread_stack	win32_test_idle_thread_stack
#define page_root_dir	win32_test_page_root_dir
#define end_of_krnl_preoccupied_area	win32_test_end_of_krnl_preoccupied_area

#define KERNEL_SIZE		(54345)		// this value has no meaning
#define TOTAL_MEMORY	(32768)		// this value has no meaning

#else

#define KERNEL_SIZE		(sys_config_data.kernel_size)
#define TOTAL_MEMORY	(sys_config_data.total_memory)
extern uint idle_thread_stack;

#endif

static int vma_avl_insert_compare(avl_node_t *fst, avl_node_t *snd)
{
	vma_t *first = AVLNODE_ENTRY(vma_t, avlnode, fst);
	vma_t *second = AVLNODE_ENTRY(vma_t, avlnode, snd);
	if (first->addr + first->size * PAGE_SZ <= second->addr)
		return -1;
	else if (first->addr >= second->addr + second->size * PAGE_SZ)
		return 1;
	else return 0;
}

static int vma_avl_compare(avl_node_t *fst, avl_node_t *snd)
{
	vma_t *first = AVLNODE_ENTRY(vma_t, avlnode, fst);
	vma_t *second = AVLNODE_ENTRY(vma_t, avlnode, snd);
	if (first->addr < second->addr)
		return -1;
	else if (first->addr >= second->addr + second->size * PAGE_SZ)
		return 1;
	else return 0;
}

static void vma_ctor(kmem_cache_t* mcache, void* p, size_t sz)
{
	vma_t *vma = (vma_t *)p;
	memset(vma, 0, sizeof(vma_t));
	listnode_init(vma->ownerlist);
}

static vma_t* vma_alloc(void)
{
	static kmem_cache_t *vma_cache = NULL;

	if (NULL == vma_cache)
	{
		vma_cache = easy_kmem_cache_create("vma_cache",
			sizeof(vma_t), vma_ctor, 0);
		if (NULL == vma_cache) return NULL;
	}
	return (vma_t*)kmem_cache_alloc(vma_cache);
}

#define vma_free	kmem_cache_free

vma_t* create_vma(vma_mgr_t* vmamgr, uint addr, uint pages)
{
	vma_t *vma;

	if (NULL == vmamgr)
		return NULL;

	if (addr & (PAGE_SZ - 1) || (addr + pages * PAGE_SZ <= addr))
		return NULL;

	vma = vma_alloc();
	if (NULL == vma) return NULL;

	vma->addr = addr;
	vma->size = pages;
	if (insert_vma(vmamgr, vma))
	{
		vma_free(vma);
		return NULL;
	}
	return vma;
}

vma_mgr_t* create_vma_manager(void)
{
	return NULL;
}

// this function is not locked
static int req_vma_from_hole(uint hole_start, uint next_hole_start,
		vma_mgr_t* vmamgr, vma_t *vma, uint _align)
{
	uint start_addr, end_addr;

	if (next_hole_start <= hole_start)
		return 1;

	start_addr = (hole_start + _align - 1) / _align * _align;
	end_addr = start_addr + vma->size * PAGE_SZ;
	if (end_addr > next_hole_start) return 2;

	vma->addr = start_addr;
	return insert_vma(vmamgr, vma);
}

int request_vma(vma_mgr_t* vmamgr, vma_t *vma, uint _align)
{
	int ret;
	uint hole_start;
	vma_t *prev = NULL;
	vma_t *vma_node = NULL;

	if (NULL == vmamgr || NULL == vma)
		return ERR_BAD_PARAMETERS;

	if ((_align & (PAGE_SZ - 1)) || !vma->size)
		return ERR_BAD_PARAMETERS;

	hole_start = vmamgr->addr;
	vma_node = vma_first(vmamgr);
	if (NULL == vma_node)
	{
		// request a vma from an empty vmamgr
		int next_hole_start;

		// need lock
		vma_spinlock(vmamgr);
		next_hole_start = hole_start + vmamgr->pages * PAGE_SZ;
		ret = req_vma_from_hole(hole_start, next_hole_start, vmamgr, vma, _align);
		vma_spinunlock(vmamgr);
		return ret;
	}

	// here we use a stupid way, todo: need optimization
	// we do a linear search to find any possible hole
	// which meets the size and align requirement

	// we need lock
	vma_spinlock(vmamgr);
	while (NULL != vma_node)
	{
		prev = vma_node;
		ret = req_vma_from_hole(hole_start, vma_node->addr, vmamgr, vma, _align);
		if (!ret) break;

		hole_start = vma_node->addr + vma_node->size * PAGE_SZ;
		vma_node = vma_next(vmamgr, prev);
	}

	// from the end of last vma to the end of whole scope of vmamgr
	// there is still a hole, try this
	if (ret)
	{
		kassert(NULL != prev);
		hole_start = prev->addr + prev->size * PAGE_SZ;
		ret = req_vma_from_hole(hole_start, vmamgr->addr
			+ vmamgr->pages * PAGE_SZ, vmamgr, vma, _align);
	}

	vma_spinunlock(vmamgr);
	return ret;
}

int insert_vma(vma_mgr_t* vmamgr, vma_t* vma)
{
	int ret;
	if (NULL == vmamgr || NULL == vma)
		return ERR_BAD_PARAMETERS;

	// check if the vma in the range of the vma mgr
	{
		uint vma_end = vma->addr + vma->size * PAGE_SZ;
		uint vmamgr_end = vmamgr->addr + vmamgr->pages * PAGE_SZ;
		if (vma->addr < vmamgr->addr || vma_end > vmamgr_end)
			return ERR_OUT_OF_RANGE;
	}
	
	vma_spinlock(vmamgr);
	ret = avl_insert(&vmamgr->vma_avl_head, &(vma->avlnode), vma_avl_insert_compare);
	if (ret)
	{
		vma_spinunlock(vmamgr);
		return ret;
	}

	listnode_add(vmamgr->vma_list_head, vma->ownerlist);
	vmamgr->latest_vma = vma;
	vmamgr->vma_count++;
	vma_spinunlock(vmamgr);

	return 0;
}

vma_t* find_vma_by_addr(vma_mgr_t* vmamgr, uint address)
{
	vma_t *last_vma;
	avl_node_t *avlnd;
	vma_t *ret = NULL;

	if (NULL == vmamgr)
		return NULL;

	// this operation need lock
	vma_spinlock(vmamgr);

	// see if last one meets
	last_vma = vmamgr->latest_vma;
	if (last_vma && (address >= last_vma->addr
		&& address < last_vma->addr + last_vma->size * PAGE_SZ))
	{
		vma_spinunlock(vmamgr);
		return last_vma;
	}

	// see if we can conduct a quick search
	if (vmamgr->vma_count < VMA_MGR_QUICK_SEARCH_CNT)
	{
		listnode_t *node;
		node = vmamgr->vma_list_head.next;
		for (; node != &(vmamgr->vma_list_head); node = node->next)
		{
			vma_t *vmanode = LIST_ENTRY(vma_t, ownerlist, node);
			if (address >= vmanode->addr && address < vmanode->addr + vmanode->size * PAGE_SZ)
			{
				ret = vmanode;
				// save as current
				vmamgr->latest_vma = vmanode;
				break;
			}
		}
		vma_spinunlock(vmamgr);
		return ret;
	}

	// search it in avl tree
	avlnd = avl_find(vmamgr->vma_avl_head, MAKE_FIND_OBJECT(address, vma_t, addr, avlnode),
		vma_avl_compare);

	if (avlnd)
	{
		ret = AVLNODE_ENTRY(vma_t, avlnode, avlnd);
		vmamgr->latest_vma = ret;
	}

	vma_spinunlock(vmamgr);
	return ret;
}

vma_t* vma_first(vma_mgr_t *vma_mgr)
{
	vma_t *ret;
	avl_node_t *avlfirst;

	if (NULL == vma_mgr)
		return NULL;

	// need lock
	vma_spinlock(vma_mgr);
	avlfirst = avl_first(vma_mgr->vma_avl_head);
	if (NULL == avlfirst)
	{
		vma_spinunlock(vma_mgr);
		return NULL;
	}

	ret = LIST_ENTRY(vma_t, avlnode, avlfirst);
	vma_spinunlock(vma_mgr);
	return ret;
}

vma_t* vma_next(vma_mgr_t* vma_mgr, vma_t* prev)
{
	vma_t *ret;
	avl_node_t *avlnext;

	if (NULL == vma_mgr || NULL == prev)
		return NULL;

	// need lock
	vma_spinlock(vma_mgr);
	avlnext = avl_next(&(prev->avlnode));
	if (NULL == avlnext)
	{
		vma_spinunlock(vma_mgr);
		return NULL;
	}

	ret = LIST_ENTRY(vma_t, avlnode, avlnext);
	vma_spinunlock(vma_mgr);
	return ret;
}

// ------- initialization code -------

static int init_predefined_vma(void)
{
	extern size_t size_of_page_t;
	extern uint idle_thread_stack;

	init_vma(&krnl_low_premapped_area1);
	init_vma(&kernel_area);
	init_vma(&page_entry_table_area);
	init_vma(&krnl_proc_preoccupied_area);
	init_vma(&krnl_low_premapped_area2);

	// init krnl_low_premapped_area1 vma
	krnl_low_premapped_area1.addr = 0xC0000000;
	krnl_low_premapped_area1.size = 160;			// 640KB
	krnl_low_premapped_area1.flags |=
		VMA_FLAG_WRITE | VMA_FLAG_KRNL_SPACE
		| VMA_FLAG_PRECOMMITTED;

	if (insert_vma(&kernel_vma_mgr, &krnl_low_premapped_area1))
		return ERR_OTHER(1);

	// init kernel area vma
	kernel_area.addr = KERNEL_BASE;
	kernel_area.size = (KERNEL_SIZE + (PAGE_SZ - 1)) / PAGE_SZ;

	// add a page for sys_config
	kernel_area.size++;
	kernel_area.flags |=
		VMA_FLAG_EXECUTE | VMA_FLAG_KRNL_SPACE
		| VMA_FLAG_PRECOMMITTED;

	if (insert_vma(&kernel_vma_mgr, &kernel_area))
		return ERR_OTHER(2);

	// init page_entry_table_area
	page_entry_table_area.addr = kernel_area.addr
		+ kernel_area.size * PAGE_SZ;
	page_entry_table_area.size = (TOTAL_MEMORY / 4 * size_of_page_t
		+ PAGE_SZ - 1) / PAGE_SZ;

	page_entry_table_area.flags |=
		VMA_FLAG_WRITE | VMA_FLAG_KRNL_SPACE
		| VMA_FLAG_PRECOMMITTED;

	if (insert_vma(&kernel_vma_mgr, &page_entry_table_area))
		return ERR_OTHER(3);

	kassert(page_entry_table_area.addr + page_entry_table_area.size\
		* PAGE_SZ == (uint)idle_thread_stack);

	// init kernel process preoccupied area
	krnl_proc_preoccupied_area.addr = (uint)idle_thread_stack;
	krnl_proc_preoccupied_area.size = ((uint)end_of_krnl_preoccupied_area
		- (uint)idle_thread_stack) / PAGE_SZ;

	krnl_proc_preoccupied_area.flags |=
		VMA_FLAG_WRITE | VMA_FLAG_KRNL_SPACE
		| VMA_FLAG_PRECOMMITTED;

	if (insert_vma(&kernel_vma_mgr, &krnl_proc_preoccupied_area))
		return ERR_OTHER(4);

	// kernel pre-occupied area must be lower than 16M boundary
	kassert(krnl_proc_preoccupied_area.addr + krnl_proc_preoccupied_area.size\
		* PAGE_SZ < KRNL_LOW_MEM_BOUNDARY);

	// Low area free memory block
	krnl_low_premapped_area2.addr = krnl_proc_preoccupied_area.addr
		+ krnl_proc_preoccupied_area.size * PAGE_SZ;
	krnl_low_premapped_area2.size = (KRNL_LOW_MEM_BOUNDARY
		- krnl_low_premapped_area2.addr) / PAGE_SZ;
	krnl_low_premapped_area2.flags |=							// todo: need check
		VMA_FLAG_WRITE | VMA_FLAG_KRNL_SPACE |
		VMA_FLAG_HEAP | VMA_FLAG_HEAP_USE_LOW_MEM;
	krnl_low_premapped_area2.vma_ops = &kernel_heap_ops;

	if (insert_vma(&kernel_vma_mgr, &krnl_low_premapped_area2))
		return ERR_OTHER(5);

	return global_vma_init_kenrel_heap();
}

#if DBG_LEVEL == DBG_LEVEL3
static void predefine_vma_info_output(void)
{
	vma_t *first = vma_first(&kernel_vma_mgr);
	if (NULL == first) return;

	dbg_output3("\n");
	dbg_output3("--- predefined vma list ---\n");
	while (first)
	{
		dbg_output3("addr scope: 0x%08X - 0x%08X, pages: %-5u, flag: %X\n",
			first->addr, first->addr + first->size * PAGE_SZ, first->size, first->flags);
		first = vma_next(&kernel_vma_mgr, first);
	}
	dbg_output3("--- end ---\n\n");
}
#endif

int global_init_vma_module(void)
{
	// init the kernel vma manager (cover kernel area)
	init_vma_mgr(&kernel_vma_mgr, 0xC0000000, 0x3FFFF);

	// init predefined kernel vma(s)
	if (init_predefined_vma())
		return 1;

#if DBG_LEVEL == DBG_LEVEL3
	predefine_vma_info_output();
#endif

	return 0;
}

#ifdef TEST_DBG_WIN32

#include <stdio.h>
#include <string.h>
#include <assert.h>

int vma_test(void)
{
	vma_t *vma;
	vma_t req_vma_tst;
	extern size_of_page_t;

	win32_test_idle_thread_stack = 0xc0110000 + ((KERNEL_SIZE + PAGE_SZ - 1) & ~(PAGE_SZ - 1))
		+ PAGE_SZ + ((TOTAL_MEMORY / 4 * size_of_page_t + PAGE_SZ - 1) & ~(PAGE_SZ - 1));
	win32_test_page_root_dir = win32_test_idle_thread_stack + PAGE_SZ * 2;
	win32_test_end_of_krnl_preoccupied_area = win32_test_page_root_dir + PAGE_SZ * 2;
	
	global_init_vma_module();

	vma = find_vma_by_addr(&kernel_vma_mgr, 0xC0000000);
	if (NULL == vma) return 1;
	if (vma != &krnl_low_premapped_area1) return 2;

	init_vma(&req_vma_tst);
	req_vma_tst.size = 256;
	req_vma_tst.flags |= VMA_FLAG_WRITE | VMA_FLAG_KRNL_SPACE;
	request_vma(&kernel_vma_mgr, &req_vma_tst, 0x3000);
	return 0;
}

#endif

/* EOF */
