#include "arch/pgd.h"
#include "std/debug.h"
#include "init/startup.h"
#include "kernel/vma.h"

typedef struct pg_accessor_mgr
{
	vma_t vma_area;
	spinlock_t spinlock;

	// relative index of first free page
	int freelist;
}
page_accessor_mgr_t;

page_accessor_mgr_t page_accessor_mgr;
uint* page_accessor_pte_start_addr = 0;

// reserved sys memory area description
struct mem_reserved_area sys_mem_resv_area[] =
{
	// 640K ~ 1024K
	{ 640 * 1024, 1024 * 1024},
	{ 0, 0 },
};

// we make use of high 16 bits to store chunk size (in page count)
#define PAGE_ACCESSOR_GET_PAGE_COUNT(v)		((v) >> 16)
#define PAGE_ACCESSOR_PAGE_CNT_SHIFT		(16)
#define PAGE_ACCESSOR_MAX_PAGE_CNT			(0xFFFF)
#define INVALID_ACCESSOR_PG_CNT				(PAGE_ACCESSOR_MAX_PAGE_CNT)

#define page_accessor_vma_start				(page_accessor_mgr.vma_area.addr)
#define page_accessor_vma_end				(page_accessor_mgr.vma_area.addr	\
	+ page_accessor_mgr.vma_area.size * PAGE_SZ)

static spinlock_t pgt_list_spinlock;

int set_kernel_pgd(uint idx, uint pgidx, uint flag)
{
	uint* next = page_root_dir;

	if (idx < GET_PGD_OFFSET(KERNEL_BASE))
		return ERR_BAD_PARAMETERS;

	// need lock
	spin_lock(&pgt_list_spinlock);

	while (next != 0)
	{
		// set pgd for current table
		_set_pgd(next, idx, pgidx, flag);

		// seeking for next
		kassert(!PAGE_PRESENT(*next));
		next = (uint*)((*next) & ~(PAGE_SZ - 1));
	}

	// unlock
	spin_unlock(&pgt_list_spinlock);
	return 0;
}

bool link_task_pgd(uint* tsk_pgd)
{
	if (((uint)tsk_pgd) & (PAGE_SZ - 1))
		return false;

	// need lock
	spin_lock(&pgt_list_spinlock);

	*tsk_pgd = *page_root_dir;
	*page_root_dir = ((uint)tsk_pgd);

	// unlock
	spin_unlock(&pgt_list_spinlock);
}

static uint* get_pte_entry(uint virt_addr)
{
	uint idx;
	uint *pte, pgd;

	pgd = page_root_dir[GET_PGD_OFFSET(virt_addr)];
	if (!PAGE_PRESENT(pgd)) return NULL;

	idx = PGD_GET_IDX(pgd);
	if (!PAGE_IDX_VALID(idx)) return NULL;

	pte = (uint*)virt_addr(IDX2PAGE(idx));
	if (NULL == pte) return NULL;

	return pte + GET_PTE_OFFSET(virt_addr);
}

page_t* _virt_addr_page(uint virt_addr)
{
	uint idx;

	uint* pte = get_pte_entry(virt_addr);
	if (NULL == pte) return NULL;

	idx = PTE_GET_IDX(*pte);
	if (!PAGE_IDX_VALID(idx)) return NULL;

	return IDX2PAGE(idx);
}

static uint* page_accessor_get_next_pgd_entry(uint prevaddr, uint *prev)
{
	uint curaddr = prevaddr + PAGE_SZ;
	if ((curaddr) & (PAGE_SZ * 1024 - 1))
		return prev + 1;
	else return get_pte_entry(curaddr);
}

// this function is not locked
// this function return an index
static uint page_accessor_get_page(bool lock)
{
	uint freeitem = page_accessor_mgr.freelist;
	if (freeitem == PAGE_ACCESSOR_MAX_PAGE_CNT)
		return INVALID_ACCESSOR_PG_CNT;

	// see if we need lock
	if (lock) page_accessor_pte_start_addr[freeitem] |= PFG_ACCESSOR_LOCK;

	page_accessor_mgr.freelist = PAGE_ACCESSOR_GET_PAGE_COUNT(\
		page_accessor_pte_start_addr[freeitem]);

	return freeitem;
}

// after this call, the phy page is able to be accessed
// via the virtual address stored in page_t
uint page_accessor_bind_phy_page(page_t *p, bool lock)
{
	uint idx, virt_addr;
	
	// need lock
	spin_lock(&page_accessor_mgr.spinlock);

	idx = page_accessor_get_page(lock);
	if (INVALID_ACCESSOR_PG_CNT == idx)
	{
		spin_unlock(&page_accessor_mgr.spinlock);
		return 0;
	}
	virt_addr = page_accessor_mgr.vma_area.addr + idx * PAGE_SZ;

	// save the virtual address
	set_page_virt_addr(p, virt_addr);

	// update the pte
	_set_pte(page_accessor_pte_start_addr, idx, PAGE_IDX(p), 0);
	__flush_tlb();

	spin_unlock(&page_accessor_mgr.spinlock);
	return virt_addr;
}

bool page_accessor_lock(uint virt_addr)
{
	uint idx;
	if (virt_addr < page_accessor_vma_start
		|| virt_addr >= page_accessor_vma_end)
		return false;

	// need lock
	spin_lock(&page_accessor_mgr.spinlock);

	idx = (virt_addr - page_accessor_vma_start) / PAGE_SZ;
	if (!PAGE_PRESENT(page_accessor_pte_start_addr[idx]))
	{
		spin_unlock(&page_accessor_mgr.spinlock);
		return false;
	}

	page_accessor_pte_start_addr[idx] |= PFG_ACCESSOR_LOCK;
	spin_unlock(&page_accessor_mgr.spinlock);
	return true;
}

void page_accessor_unlock(uint virt_addr)
{
	uint idx;
	if (virt_addr < page_accessor_vma_start
		|| virt_addr >= page_accessor_vma_end)
		return;

	idx = (virt_addr - page_accessor_vma_start) / PAGE_SZ;
	page_accessor_pte_start_addr[idx] &= ~PFG_ACCESSOR_LOCK;
}

// lock:
// [in]: indicate if we need lock the virtual address if the
// address is in range of accessor area
// [out]: indicate if we need to do unlock operation
// note: this function is not locked
static uint page_accessor_get_virt_addr(page_t* p, bool* lock)
{
	uint ret;
	bool need_lock;

	if (!p)
	{
		if (lock) *lock = false;
		return 0;
	}

	ret = virt_addr(p);
	need_lock = (lock && *lock) ? true : false;

	// if no virtual address recorded in page_t
	// temporary map it for access
	if (!ret)
	{
		ret = page_accessor_bind_phy_page(p, need_lock);
		if (lock) *lock = (ret) ? true : false;
		return ret;
	}

	// if a lock is requested, that means user want to make sure
	// the validity of the virtual address
	// for a non-page accessor area page, there is no problem since
	// these page are alway accessable. we omit this case
	// for a page accessor area page, the accessibility is not guaranteed
	// since the map of page may destroyed at any time
	if (ret >= page_accessor_vma_start && ret < page_accessor_vma_end && need_lock)
	{
		// this is for page accessor area
		// try to lock this virtual address
		if (!page_accessor_lock(ret))
		{
			// fail to lock, we rebuild the map with locked return
			ret = page_accessor_bind_phy_page(p, true);
			if (lock) *lock = (ret) ? true : false;
		}
	}
	else
	{
		// result: not locked
		if (lock) *lock = false;
	}
	return ret;
}

int map_page(uint* pgdt, uint virt_addr, page_t *p, uint flags)
{
	uint pte, pgd;
	page_t* pte_pg;
	bool locked = true;

	pgd = pgdt[GET_PGD_OFFSET(virt_addr)];
	if (!PAGE_PRESENT(pgd))
	{
		// allocate a page for pte
		pte_pg = alloc_page(__GFP_HIGH | __GFP_WAIT | __GFP_LOCK);
		if (NULL == pte_pg) return 1;

		// reset the page's flag
		pte_pg->flags &= ~PAGE_FLAG_SWAPABLE;
		pte_pg->flags |= PAGE_FLAG_PGDTE;
		unlock_page(pte_pg);
	}
	else
	{
		uint idx = PGD_GET_IDX(pgd);
		if (!PAGE_IDX_VALID(idx)) return 2;
		pte_pg = IDX2PAGE(idx);
	}

	pte = page_accessor_get_virt_addr(pte_pg, &locked);
	if (!pte) return 3;

	// see if we need to update pgdt
	if (!PAGE_PRESENT(pgd))
	{
		memset((void *)pte, 0, PAGE_SZ);
		_set_pgd(pgdt, GET_PGD_OFFSET(virt_addr), PAGE_IDX(pte_pg), 0);
	}

	// update the pte
	_set_pte(pte, GET_PTE_OFFSET(virt_addr), PAGE_IDX(p), flags);
	__flush_tlb();

	// set its virtual address
	set_page_virt_addr(p, virt_addr);

	// unlock
	if (locked) page_accessor_unlock(pte);

	//	dbg_output3("page %u mapped to virt_addr 0x%08X\n",
	//		PAGE_IDX(p), virt_addr);
	return 0;
}

#ifndef TEST_DBG_WIN32
void init_accessor_area(void)
{
	int i;
	uint pgd_cnt = flat_get_accessor_pgd_count();
	uint sz = pgd_cnt * PAGE_SZ / sizeof(uint);			// in pages
	uint start_addr = (uint)end_of_krnl_preoccupied_area - pgd_cnt * PAGE_SZ;
	page_accessor_pte_start_addr = (uint*)start_addr;

	init_vma(&page_accessor_mgr.vma_area);
	page_accessor_mgr.vma_area.vma_ops = NULL;

	page_accessor_mgr.vma_area.size = sz;
	page_accessor_mgr.vma_area.flags |= VMA_FLAG_WRITE | VMA_FLAG_KRNL_SPACE;

	if (request_vma(&kernel_vma_mgr, &page_accessor_mgr.vma_area,
		PAGE_SZ * PAGE_SZ / sizeof(uint)))
		goto fail_init_accessor_area;

	// init the spinlock
	init_spinlock(&page_accessor_mgr.spinlock);

	// init the freelist
	spin_lock(&page_accessor_mgr.spinlock);
	for (i = 0; i < sz; ++i)
		((uint*)start_addr)[i] = (i + 1) << PAGE_ACCESSOR_PAGE_CNT_SHIFT;
	((uint*)start_addr)[sz - 1] = PAGE_ACCESSOR_MAX_PAGE_CNT << PAGE_ACCESSOR_PAGE_CNT_SHIFT;
	page_accessor_mgr.freelist = 0;		// point to the first one

	// update the page_dir
	kassert(page_root_dir[0] == 0);
	_set_pgd(page_root_dir, GET_PGD_OFFSET(page_accessor_mgr.vma_area.addr),\
		(start_addr - KERNEL_START) / PAGE_SZ, 0);
	__flush_tlb();

	spin_unlock(&page_accessor_mgr.spinlock);

	dbg_output3("config %u items for accessor pte at: %X\n",
		sz, start_addr);
	dbg_output3("page accessor area start from: %X\n",
		page_accessor_mgr.vma_area.addr);

	return;

fail_init_accessor_area:

	printk(">PANIC<: unable to request page_accessor_vma_area.\n");
	go32_die_endless();
}
#endif

#ifdef TEST_DBG_WIN32

void pgd_chunk_mgr_test(void)
{
	init_spinlock(&pgt_list_spinlock);
	init_pgd_chunkmgr(&page_accessor_chunkmgr, 0xC0000000, 128);
}

#endif
/* EOF */
