#include "vma.h"
#include "page.h"
#include "heap.h"
#include "chunk.h"
#include "init/startup.h"

// for win32 test
#ifdef TEST_DBG_WIN32
#define TOTAL_MEMORY	(32768)		// this value has no meaning
static uint win32_test_page_root_dir;
#define page_root_dir	win32_test_page_root_dir
#else
#define TOTAL_MEMORY	(sys_config_data.total_memory)
#endif

// spinlock
DECLARE_PRIVATE_SPINLOCK(heap_spinlock);

typedef struct buffered_chunk
{
	size_t size_in_pages;
	uint next;
}
buffered_chunk_t;

static void heap_mark_page_discardable(uint addr, size_t pages)
{
	size_t i;
	for (i = 0; i < pages; ++i, addr += PAGE_SZ)
	{
		page_t *p = virt_addr_page(addr, true);
		if (p)
		{
			if (p->flags & PAGE_FLAG_SWAPPED)
			{
				// todo: discard the swapped page on the disk
			}

			p->flags &= ~PAGE_FLAG_SWAPABLE;
			p->flags |= PAGE_FLAG_DISCARDABLE;
			unlock_page(p);
		}
	}
}

static bool heap_vmalloc_reap_one(struct vma* vma)
{
	size_t pages;
	buffered_chunk_t *next;
	
	spin_lock(&heap_spinlock);
	next = (buffered_chunk_t*)vma->userdata;
	if (NULL == next)
	{
		spin_unlock(&heap_spinlock);
		return false;
	}

	vma->userdata = (uint)next->next;
	spin_unlock(&heap_spinlock);

	pages = next->size_in_pages;
	chunk_vfree(vma, (void*)next, pages);

	// declaim that all pages are discardable
	// so that it could be discard if there
	// is a memory shortage
	heap_mark_page_discardable((uint)next, pages);
	return true;
}

static bool heap_vmalloc_reap(struct vma* vma)
{
	if (!vma->userdata)
		return false;

	// reap all pending chunks
	while (heap_vmalloc_reap_one(vma));
	return true;
}

static void mark_allocated_pages(uint _addr, size_t pages)
{
	size_t i;
	for (i = 0; i < pages; ++i, _addr += PAGE_SZ)
	{
		page_t *p = virt_addr_page(_addr, true);
		if (p)
		{
			p->flags &= ~PAGE_FLAG_DISCARDABLE;
			unlock_page(p);
		}
	}
}

// vmalloc for default kernel heap
static void* do_heap_vmalloc(struct vma* vma, size_t pages)
{
	void *ret;
	
	if (!pages) return NULL;
	ret = chunk_vmalloc(vma, pages);

	if (NULL == ret)
	{
		if (!(vma->flags & VMA_FLAG_HEAP_LAZY_RELEASE))
			return NULL;

		// see if there is any space waiting for
		// release, we release(reap) it now
		if (!heap_vmalloc_reap(vma))
			return NULL;
		
		// try vmalloc again
		ret = chunk_vmalloc(vma, pages);
		if (NULL == ret) return NULL;
	}

	mark_allocated_pages((uint)ret, pages);
	return ret;
}

// vfree for default kernel heap
static int do_heap_vfree(struct vma* vma, void* addr, size_t pages)
{
	buffered_chunk_t *bchk;

	if (NULL == vma || NULL == addr ||!pages)
		return ERR_BAD_PARAMETERS;

	// lock
	spin_lock(&heap_spinlock);

	// build a buffered chunk
	bchk = (buffered_chunk_t*)addr;
	bchk->size_in_pages = pages;
	bchk->next = vma->userdata;

	// add the chunk to the list
	vma->userdata = (uint)bchk;
	spin_unlock(&heap_spinlock);

	// todo: wakup the thread
	return 0;
}

static int page_fault_mode_check(uint errcode, struct vma* vma)
{
	if (vma->flags & VMA_FLAG_KRNL_SPACE)
	{
		// the vma is in kernel space
		if (!PAGE_ERROR_KERNEL(errcode))
			return HEAP_PF_MODE_ERROR;
	}
	else
	{
		// the vma is in user space
		if (!PAGE_ERROR_USER(errcode))
			return HEAP_PF_MODE_ERROR;
	}
	return 0;
}

static int page_fault_access_check(uint errcode, struct vma* vma)
{
	// if write operation is not allowed in this vma
	// and the page fault caused by write, we return error
	if (!(vma->flags & VMA_FLAG_WRITE) && PAGE_ERR_IN_WRITE(errcode))
		return HEAP_PF_ACCESS_ERROR;
	return 0;
}

static page_t* page_fault_alloc_heap_page(struct vma* vma)
{
	uint flags = __GFP_WAIT | __GFP_LOCK;
	if (vma->flags & VMA_FLAG_HEAP_USE_LOW_MEM)
		flags |= __GFP_LOWONLY;
	else flags |= __GFP_HIGH;

	return alloc_page(flags);
}

// handle_page_fault for kernel heap
static int do_heap_handle_page_fault(regs_t* regs, struct vma* vma, uint addr)
{
	int ret;
	page_t *p;
	uint errcode = regs->u.error_code;

	if (!(vma->flags & VMA_FLAG_HEAP))
		return HEAP_PF_VMA_NOT_A_HEAP;

	// see if this is some user mode code accessing
	// the kernel memory or vice versa
	ret = page_fault_mode_check(errcode, vma);
	if (ret) return ret;

	// see if this is page protection fault
	if (PAGE_PROTECTION_FAULT(errcode))
		return HEAP_PF_PAGE_PROTECTION_FAULT;

	ret = page_fault_access_check(errcode, vma);
	if (ret) return ret;

	p = page_fault_alloc_heap_page(vma);
	if (NULL == p) return HEAP_PF_SHORTAGE_OF_PAGE;

	if (krnl_map_page(addr, p, 0))
	{
		unlock_page(p);
		free_page(p);
		return HEAP_PF_FAIL_TO_MAP_PAGE;
	}

	unlock_page(p);
	return 0;
}

// vma operations
struct vma_operations kernel_heap_ops =
{
	do_heap_vmalloc,
	do_heap_vfree,
	do_heap_handle_page_fault,
};

// this is the global kernel heap vma
vma_t default_kernel_heap;

// this function returns in page
static size_t calc_default_kernel_heap_size(void)
{
	size_t retval;
	size_t total_mem_mb = TOTAL_MEMORY / 1024;

	retval = total_mem_mb / 4;
	if (retval < MIN_KRNL_DEFAULT_HEAP_SZ)
		retval = MIN_KRNL_DEFAULT_HEAP_SZ;
	else if (retval > MAX_KRNL_DEFAULT_HEAP_SZ)
		retval = MAX_KRNL_DEFAULT_HEAP_SZ;

	return retval * 1024 * 1024 / PAGE_SZ;
}

// virtual malloc for pages
// this function allocate pages from
// default kernel heap
void* vmalloc(size_t pages)
{
	if (!default_kernel_heap.vma_ops
		|| !default_kernel_heap.vma_ops->vmalloc)
		return NULL;

	return default_kernel_heap.vma_ops->vmalloc(
		&default_kernel_heap, pages);
}

void* heap_vmalloc(struct vma* vma, size_t pages)
{
	if (!vma || !vma->vma_ops
		|| !vma->vma_ops->vmalloc)
		return NULL;

	return vma->vma_ops->vmalloc(vma, pages);
}

// free pages allocated by virtual malloc
// this function free pages to the
// default kernel heap
int vfree(void* addr, size_t pages)
{
	if (!default_kernel_heap.vma_ops
		|| !default_kernel_heap.vma_ops->vfree)
		return ERR_BAD_PARAMETERS;

	return default_kernel_heap.vma_ops->vfree(
		&default_kernel_heap, addr, pages);
}

int heap_vfree(struct vma* vma, void* addr, size_t pages)
{
	if (!vma || !vma->vma_ops
		|| !vma->vma_ops->vfree)
		return ERR_BAD_PARAMETERS;

	return vma->vma_ops->vfree(vma, addr, pages);
}

int global_vma_init_kenrel_heap(void)
{
	init_vma(&default_kernel_heap);

	// default kernel heap
	default_kernel_heap.addr = KRNL_LOW_MEM_BOUNDARY;
	default_kernel_heap.size = calc_default_kernel_heap_size();
	default_kernel_heap.flags |=
		VMA_FLAG_WRITE | VMA_FLAG_KRNL_SPACE |
		VMA_FLAG_HEAP | VMA_FLAG_HEAP_LAZY_RELEASE;
	default_kernel_heap.vma_ops = &kernel_heap_ops;

	return insert_vma(&kernel_vma_mgr, &default_kernel_heap);
}

int global_init_heap(void)
{
	/* initialize the chunk for
	* 1) Low area free memory block (kernel low premapped area2)
	     This area is lower than 16M, DMA requires that the memory
		 address must be lower than 16M
	  2) default kernel heap
	     all the important data sturctures are resided in this area
	 */
	init_chunk(&krnl_low_premapped_area2);
	init_chunk(&default_kernel_heap);
	return 0;
}

/* EOF */
