#include "vma.h"
#include "page.h"
#include "chunk.h"
#include "std/debug.h"
#include "init/startup.h"

#ifdef __cplusplus
extern "C" {
#endif

// global variable
size_t size_of_page_t = sizeof(page_t);
global_page_data_t global_page_data = {0};
vma_t accessor_area_vma = {0};

static page_t* do_alloc_page(page_t* p)
{
	uint idx = PAGE_IDX(p);

	listnode_del(p->ownerlist);
	if (idx * PAGE_SZ < SZ_MEM_LOW_AREA)
		--global_low_area_free_page_count;
	else --global_hi_area_free_page_count;

	listnode_add(global_dirty_page_list, p->ownerlist);
	return p;
}

// allocate a physical page
page_t* alloc_page(uint gfpmsk)
{
	page_t *ret;
	listnode_t *node;
	listnode_t *free_list, *oppsite;
	// todo: rewrite needed

alloc_page_restart:

	// need lock
	spin_lock(&global_page_data.spinlock);

	if (gfpmsk & (__GFP_LOW | __GFP_LOWONLY))
	{
		free_list = &global_low_area_free_page_list;
		oppsite = &global_hi_area_free_page_list;
	}
	else
	{
		free_list = &global_hi_area_free_page_list;
		oppsite = &global_low_area_free_page_list;
	}

	// see if there are free pages
	if (listnode_isempty(*free_list))
	{
		if (gfpmsk & (__GFP_LOWONLY | __GFP_HIGHONLY))
		{
			if (gfpmsk & __GFP_WAIT)
			{
				spin_unlock(&global_page_data.spinlock);
				// todo: wait for laundry
				goto alloc_page_restart;
			}
			else goto alloc_page_fail;
		}

		free_list = oppsite;
		if (listnode_isempty(*free_list))
		{
			if (gfpmsk & __GFP_WAIT)
			{
				spin_unlock(&global_page_data.spinlock);
				// todo: wait for laundry
				goto alloc_page_restart;
			}
			else goto alloc_page_fail;
		}
	}

	node = free_list->next;
	ret = LIST_ENTRY(page_t, ownerlist, node);
	do_alloc_page(ret);

	// see if we need to lock the page
	// before return
	if (gfpmsk & __GFP_LOCK) lock_page(ret);

	spin_unlock(&global_page_data.spinlock);
	return ret;

alloc_page_fail:
	spin_unlock(&global_page_data.spinlock);
	return NULL;
}

// free a physical page
void free_page(page_t *p)
{
	// todo: rewrite needed
	uint idx;
	
	// need lock
	spin_lock(&global_page_data.spinlock);

	idx = PAGE_IDX(p);
	listnode_del(p->ownerlist);
	p->flags = DEFAULT_PAGE_FLAG;

	if (idx * PAGE_SZ < SZ_MEM_LOW_AREA)
	{
		listnode_add(global_low_area_free_page_list, p->ownerlist);
		++global_low_area_free_page_count;
	}
	else
	{
		listnode_add(global_hi_area_free_page_list, p->ownerlist);
		++global_hi_area_free_page_count;
	}

	spin_unlock(&global_page_data.spinlock);
}

page_t* virt_addr_page(uint virtaddr, bool lock)
{
	page_t *p;

	// need lock
	spin_lock(&global_page_data.spinlock);

	p = _virt_addr_page(virtaddr);
	if (!p) goto virt_addr_page_finished;

	if (lock) lock_page(p);
	
virt_addr_page_finished:
	spin_unlock(&global_page_data.spinlock);
	return p;
}

// test for win32
#ifdef TEST_DBG_WIN32
struct mem_reserved_area sys_mem_resv_area[] =
{
	// 640K ~ 1024K
	{ 640 * 1024, 1024 * 1024},
	{ 0, 0 },
};
#endif

static void handle_reserved_pages(void)
{
	struct mem_reserved_area *area = sys_mem_resv_area;
	for (; area->end; ++area)
	{
		uint s, e;
		kassert(!(area->end & (PAGE_SZ - 1)));
		kassert(!(area->start & (PAGE_SZ - 1)));

		e = area->end / PAGE_SZ;
		if (e > global_total_pages) e = global_total_pages;
		s = area->start / PAGE_SZ;
		if (s >= e) continue;

		for (; s != e; ++s)
		{
			global_page_table[s].flags |= PAGE_FLAG_RESERVED;
			listnode_init(global_page_table[s].ownerlist);
			listnode_add(global_reserved_page_list, global_page_table[s].ownerlist);
			++global_reserved_page_count;
		}
	}
}

static void add_page_to_queue(uint idx, page_t *p)
{
	// see if it is in low memory area
	if (idx * PAGE_SZ < SZ_MEM_LOW_AREA)
	{
		listnode_add(global_low_area_free_page_list, p->ownerlist);
		++global_low_area_free_page_count;
	}
	else
	{
		listnode_add(global_hi_area_free_page_list, p->ownerlist);
		++global_hi_area_free_page_count;
	}
}

static void handle_preload_vma_memory(vma_t *vma)
{
	uint start_addr = vma->addr;
	uint i, start_idx = (start_addr - KERNEL_START) / PAGE_SZ;
	page_t *p = IDX2PAGE(start_idx);

	for (i = 0; i < vma->size; ++i, ++start_idx, ++p)
	{
		// make the page allocated
		do_alloc_page(p);

		// set its virtual address
		set_page_virt_addr(p, start_addr);
		start_addr += PAGE_SZ;
	}
}

// initialize the page structure table
static void init_page_table(void)
{
	unsigned int i;

	// need lock
	spin_lock(&global_page_data.spinlock);

	memset(global_page_table, 0, sizeof(page_t) * global_total_pages);
	handle_reserved_pages();

	for (i = 0; i < global_total_pages; ++i)
	{
		page_t *cur_page = &global_page_table_start[i];
		if (cur_page->flags & PAGE_FLAG_RESERVED)
			continue;

		cur_page->flags |= DEFAULT_PAGE_FLAG;
		listnode_init(cur_page->ownerlist);
		add_page_to_queue(i, cur_page);
	}

	// handle all pre-loaded vma
	// these memory has already be used
	// remove them from avaliable page list
	handle_preload_vma_memory(&kernel_area);
	handle_preload_vma_memory(&page_entry_table_area);
	handle_preload_vma_memory(&krnl_proc_preoccupied_area);

	spin_unlock(&global_page_data.spinlock);
}

// initialize the page data structure
static void init_page_data(void *startaddr, uint total_pages)
{
	kassert(NULL != startaddr && total_pages);

	memset(&global_page_data, 0, sizeof(global_page_data));
	global_page_table = (page_t *)startaddr;
	global_total_pages = total_pages;

	// init the lists
	spin_lock(&global_page_data.spinlock);
	listnode_init(global_hi_area_free_page_list);
	listnode_init(global_low_area_free_page_list);
	listnode_init(global_reserved_page_list);
	listnode_init(global_dirty_page_list);
	spin_unlock(&global_page_data.spinlock);
}

static void do_global_init_page_module(void* startaddr, uint total_pages)
{
	// init spinlock first
	init_spinlock(&(global_page_data.spinlock));
	init_page_data((void*)startaddr, total_pages);

	// init the page structure for every page
	init_page_table();

	dbg_output3("page: global avail: %u, global low: %u, global hi: %u\n",
		global_avail_page_count, global_low_area_free_page_count,
		global_hi_area_free_page_count);
}

#ifdef TEST_DBG_WIN32
sys_config_t sys_config_data;
static uint win32_test_end_of_krnl_preoccupied_area;
#define end_of_krnl_preoccupied_area	win32_test_end_of_krnl_preoccupied_area
#endif

void do_init_accessor_area(void)
{
	init_accessor_area();

	// init the vma for accessor area
	init_vma(&accessor_area_vma);
	accessor_area_vma.flags |= VMA_FLAG_WRITE | VMA_FLAG_KRNL_SPACE;
}

// global initializor
void global_init_page_module(void)
{
	uint startaddr = sys_config_data.kernel_size;
	uint total_pages = sys_config_data.total_memory;

	startaddr = (startaddr + PAGE_SZ - 1) & (~PAGE_SZ);

	// this page reserved for sys config data
	startaddr += PAGE_SZ;
	startaddr += KERNEL_BASE;

	// convert total memory from KB to pages
	total_pages /= 4;
	dbg_output3("page_t map start from: %X, map items: %u\n",
		startaddr, total_pages);

	// init the page dir table for accessor area
	do_init_accessor_area();

	do_global_init_page_module((void *)startaddr, total_pages);
}

// test in win32
#ifdef TEST_DBG_WIN32

void init_accessor_area(void) {}
page_t* _virt_addr_page(uint virtaddr) { return NULL; }

uint flat_get_map_page_dir_count(void)
{
	return 2;
}

#include <stdio.h>
#include <string.h>

int page_test(void)
{
	size_t sz = 1024 * 1024 * 1024;
	void *buffer = (void*)malloc(sz);

	// test
	do_global_init_page_module(buffer, sz / 4096);
	
	free(buffer);
	return 0;
}

#endif

#ifdef __cplusplus
extern "C" {
#endif
/* EOF */
