#include <types.h>
#include <kern/proc.h>
#include <kern/monitor.h>
#include <kern/mmu.h>
#include <kern/vm.h>
#include <string.h>
#include <pool.h>
#include <x86.h>
#include <kern/memlayout.h>
#include <kern/multiboot.h>
#include <error.h>
#include <kern/debug.h>

// allocate kernel array during vm_init
#define ARR_ALLOC(_elmt_type, _arr, _nr_elmts, _va)	\
{							\
	(_va) = ROUNDUP((_va), sizeof (_elmt_type));	\
	(_arr) = (_elmt_type*)(_va);			\
	(_va) += (_nr_elmts) * sizeof(_elmt_type);	\
}

struct page_t {
	struct link_t link;
	int ref;
};

struct kvpage_t {
	struct link_t link;
	int ref;
};

/* globals */
static struct pool_t free_pages;
static struct pool_t free_kvpages;
static struct page_t* pages;
static int nrpages;
static struct kvpage_t* kvpages;
static int nrkvpages;
static unsigned int memsize; // amount of physical mem in KB

/* static functions */
static uintptr_t domain2range(uintptr_t d_start, size_t d_elmt_sz, uintptr_t d_elmt, uintptr_t r_start, size_t r_elmt_sz) __attribute__((const));
static physaddr_t page2pa(struct page_t* page);
static struct page_t* pa2page(physaddr_t pa);
static uintptr_t kvpage2kva(struct kvpage_t* kvpage);
static struct kvpage_t* kva2kvpage(uintptr_t va);
static void release_page(struct page_t* page);
static void retain_page(struct page_t* page);
static struct page_t* alloc_page();
static void swtch_pgdir_if_needed(pde_t* pgdir);
static physaddr_t alloc_kpgtbls(uintptr_t va);

/* tests */
static void test(void);

static uintptr_t domain2range(
	uintptr_t d_start, size_t d_elmt_sz, uintptr_t d_elmt,
	uintptr_t r_start, size_t r_elmt_sz)
{
	int i;
	i = (d_elmt - d_start) / d_elmt_sz;

	return r_start + (i * r_elmt_sz);
}

static physaddr_t page2pa(struct page_t* page)
{
	return (physaddr_t)domain2range((uintptr_t)pages, sizeof *pages, (uintptr_t)page, 0, PGSIZE);
}

static struct page_t* pa2page(physaddr_t pa)
{
	ASSERT((pa & (PGSIZE-1)) == 0, "pa is not aligned");
	return (struct page_t*)domain2range(0, PGSIZE, pa, (uintptr_t)pages, sizeof *pages);
}

static uintptr_t kvpage2kva(struct kvpage_t* kvpage)
{
	return (uintptr_t)domain2range((uintptr_t)kvpages, sizeof *kvpages, (uintptr_t)kvpage, KERNBASE, PGSIZE);
}

static struct kvpage_t* kva2kvpage(uintptr_t va)
{
	ASSERT((va & (PGSIZE-1)) == 0, "va is not aligned");
	return (struct kvpage_t*)domain2range(KERNBASE, PGSIZE, va, (uintptr_t)kvpages, sizeof *kvpages);
}

extern void loadsegregs(void);

void vm_init_gdt(void)
{
	struct cpu_t* c;
	uint16_t seg;

	c = &cpus[cpunum()];
	memset(c->gdt, 0, sizeof c->gdt);
	
	c->gdt[SEG_KCODE] = SEG(STA_X | STA_R, 0, 0xffffffff, DPL_KERN);
	c->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, DPL_KERN);
	c->gdt[SEG_UCODE] = SEG(STA_X | STA_R, 0, 0xffffffff, DPL_USER);
	c->gdt[SEG_UDATA] = SEG(STA_W, 0, 0xffffffff, DPL_USER);
	
	c->gdt[SEG_TSS] = SEG16(STS_T32A, &c->ts, sizeof(c->ts)-1, 0);
	c->gdt[SEG_TSS].sd_s = 0;

	// map cpu local data
 	c->gdt[SEG_KCPU] = SEG(STA_W, &c->cpu, sizeof c->cpu, DPL_KERN);

	c->cpu = c;
	// load the new gdt
	lgdt(c->gdt, sizeof(c->gdt));

	loadsegregs();


	// loading the per-cpu data segment into gs
	loadgs(SEG_KCPU << 3);
}

static void release_page(struct page_t* page)
{
	--page->ref;
	if (page->ref == 0) {
		pool_add(&free_pages, page);
	}
}

static void retain_page(struct page_t* page)
{
	++page->ref;
}

static struct page_t* alloc_page()
{
	struct page_t* page;
	if(!(page = (struct page_t*)pool_pop(&free_pages)))
		return NULL;
	ASSERT(page->ref == 0, "free page with ref != 0");
	retain_page(page);
	return page;
}

static void release_kvpage(struct kvpage_t* page)
{
	--page->ref;
	if (page->ref == 0) {
		monitor_write("I SHOULD NEVER BE HERE\n");
		pool_add(&free_kvpages, page);
	}
}

static void retain_kvpage(struct kvpage_t* page)
{
	++page->ref;
}

static struct kvpage_t* alloc_kvpage()
{
	struct kvpage_t* page;
	if(!(page = (struct kvpage_t*)pool_pop(&free_kvpages)))
		return NULL;
	ASSERT(page->ref == 0, "free page with ref != 0");
	retain_kvpage(page);
	return page;
}

static void swtch_pgdir_if_needed(pde_t* pgdir)
{
	physaddr_t pgdir_pa;
	// no switch if null
	if (!pgdir)
		return;
	pgdir_pa = vpt[VPN(pgdir)] & ~PTE_MASK;
	if (rcr3() != pgdir_pa)
		lcr3(pgdir_pa);
}

// allocates all page tables and init mapping for kernel arrays
// va is the first available virtual address (after the arrays)
// return the first available physical page address
static physaddr_t alloc_kpgtbls(uintptr_t va)
{
	extern pde_t bootpgdir[];
	physaddr_t pa;
	uintptr_t i, j;

	ASSERT((va & (PGSIZE-1)) == 0, "page is not aligned");
	bootpgdir[0] = 0;
	// create the vpt loop
	bootpgdir[PDX(VPT)] = rcr3() | PTE_W | PTE_P;
	// set pa to be out first available physical page
	pa = va - KERNBASE;

	for (i = KERNBASE + (PGSIZE * NPTENTRIES); // start at 1 pde after the boot page table
	     i; // stop when we overflow
	     i += PGSIZE * NPTENTRIES, pa += PGSIZE) { // skip to the next pde
		vpd[VPD(i)] = pa | PTE_W | PTE_P;
		// populate the page table with either the kernel address
		// or 0 if we passed the top kernel va
		for (j = i; j < i + (PGSIZE * NPTENTRIES); j += PGSIZE)
			vpt[VPN(j)] = va <= j ? 0 : ((j - KERNBASE) | PTE_W | PTE_P);
	}
	return pa;
}

int vm_is_kern_addr(uintptr_t va)
{
	return va >= KERNBASE;
}

void vm_init(void)
{
	extern char end[];
	extern multiboot_info_t* mb_info;
	uintptr_t va, i, j;
	physaddr_t pa;
	struct page_t* page;
	struct kvpage_t* kvpage;
	
	memsize = mb_info->mem_upper;

	monitor_write("physical memory size: ");
	monitor_write_hex((uint32_t)mb_info->mem_upper);
	monitor_write(" KB\n");

	pool_init(&free_pages, offsetof(struct page_t, link));
	pool_init(&free_kvpages, offsetof(struct kvpage_t, link));

	/*
	Here we allocate all the kernel arrays. Since our kmalloc can only
	allocate 1 page, we need to save place for arrays which are longer
	*/
	va = ROUNDUP(end, PGSIZE);

	nrpages = memsize / (PGSIZE/1024);
	ARR_ALLOC(struct page_t, pages, nrpages, va);

	nrkvpages = ((0xFFFFFFFF - KERNBASE) + 1) / PGSIZE;
	ARR_ALLOC(struct kvpage_t, kvpages, nrkvpages, va);

	ARR_ALLOC(struct proc_t, procs, NRPROCS, va);
	
	ncpus = 8;
	ARR_ALLOC(struct cpu_t, cpus, ncpus, va);

	/*
	After we have finished allocating all the kernel arrays, we need to
	allocate page tables for kernel address space. Allocating these
	page tables allows us to kalloc memory for kernel use no matter what
	is our current page dir.
	In contrast to the arrays, the page tables do not require direct virtual
	address mapping (va = pa +KERNBASE). The global vars vpt and vpd already
	map them.
	*/
	// set va to be our first available kernel virtual page
	va = ROUNDUP(va, PGSIZE);
	pa = alloc_kpgtbls(va);

	monitor_write("\n");

	monitor_write("total memory consumed by kernel: ");
	monitor_write_hex(pa / 1024);
	monitor_write(" KB\n");
	
	// clear all kernel arrays
	memset(pages, 0, nrpages * sizeof *pages);
	memset(kvpages, 0, nrkvpages * sizeof *kvpages);
	memset(procs, 0, NRPROCS * sizeof *procs);
	memset(cpus, 0, ncpus * sizeof *cpus);

	// init free pages, starting with pa (the first free page)
	for (page = pa2page(pa); page != &pages[nrpages]; ++page)
		pool_add(&free_pages, page);

	// init free kernel virtual pages, starting with va (the first free virtual address)
	for (kvpage = kva2kvpage(va); kvpage != &kvpages[nrkvpages]; ++kvpage)
		pool_add(&free_kvpages, kvpage);

	monitor_write("done init vm\n");

	TEST(test());
}



int vm_insert_page(pte_t* pgdir, uintptr_t va, int perm)
{
	struct page_t* page;
	int rc;

	if (!(page = alloc_page()))
		return -ENOMEM;

	rc = vm_map(pgdir, va, page2pa(page), perm);
	release_page(page);
	return rc;
}


int vm_is_mapped(pde_t* pgdir, uintptr_t va)
{
	ASSERT((va & (PGSIZE-1)) == 0, "va is not aligned");
	ASSERT(((uintptr_t)pgdir & (PGSIZE-1)) == 0, "va is not aligned");
	
	swtch_pgdir_if_needed(pgdir);
	if (!vpd[VPD(va)] || !vpt[VPN(va)])
		return 0;
	return 1;
}

void vm_init_pgdir(pde_t* pgdir)
{
	extern pde_t bootpgdir[];
	// TODO assert pgdir is in kernel va
	memmove(pgdir, bootpgdir, PGSIZE);

	// create the vpt loop
	pgdir[PDX(VPT)] = vm_va2pa(NULL, (uintptr_t)pgdir) | PTE_W | PTE_P;
}

void* vm_kalloc_page()
{
	void *addr = NULL;
	struct page_t* page = NULL;
	struct kvpage_t* kvpage = NULL;
	int rc;

	if (!(page = alloc_page()))
		goto out;

	if (!(kvpage = alloc_kvpage()))
		goto out;

	// vm_map only failed when it could not allocate a page table
	// this is never the case in the kernel since we have already
	// allocated all the kernel page tables in vm_init
	rc = vm_map(NULL, kvpage2kva(kvpage), page2pa(page), PTE_W);
	ASSERT(rc == 0, "vm_map failed when mapping a kernel page");
	
	addr = (void*)kvpage2kva(kvpage);
	
out:
	if (page)
		release_page(page);
	ASSERT(page->ref == 1, "invalid page ref");
	ASSERT(kvpage->ref == 1, "invalid page ref");
	return addr;
}

void vm_retain_kvpage(void* kpg)
{
	retain_kvpage(kva2kvpage((uintptr_t)kpg));
}

void vm_release_kvpage(void* kpg)
{
	release_kvpage(kva2kvpage((uintptr_t)kpg));
}

int vm_map(pde_t* pgdir, uintptr_t va, physaddr_t pa, int perm)
{
	struct page_t* page;
	void* pgtbl;

	ASSERT((va & (PGSIZE-1)) == 0, "va is not aligned");
	ASSERT((pa & (PGSIZE-1)) == 0, "pa is not aligned");
	ASSERT((perm & ~PTE_MASK) == 0, "invalid permissions");

	swtch_pgdir_if_needed(pgdir);
	// check if we need to allocate a page table
	if (!(vpd[VPD(va)])) {
		if (!(page = alloc_page()))
			return -ENOMEM;
		vpd[VPD(va)] = page2pa(page) | PTE_U | PTE_W | PTE_P;
		// calculate the address of the begining of the page table
		// that maps va
		pgtbl = (void*)((uintptr_t)(&vpt[VPN(va)]) & ~(PGSIZE-1));
		memset(pgtbl, 0, PGSIZE);
	}

	ASSERT(vpt[VPN(va)] == 0, "va is already mapping a page !");
	vpt[VPN(va)] = pa | perm | PTE_P;
	retain_page(pa2page(pa));
	return 0;
}

physaddr_t vm_va2pa(pde_t* pgdir, uintptr_t va)
{
	ASSERT(vm_is_mapped(pgdir, va), "va is not mapped");
	ASSERT((va & (PGSIZE-1)) == 0, "va is not aligned");

	swtch_pgdir_if_needed(pgdir);


	return vpt[VPN(va)] & ~PTE_MASK;
}

void vm_unmap(pde_t* pgdir, uintptr_t va)
{
	ASSERT((va & (PGSIZE-1)) == 0, "va is not aligned");
	ASSERT(vm_is_mapped(pgdir, va), "va is not mapped");
	swtch_pgdir_if_needed(pgdir);

	release_page(pa2page(vpt[VPN(va)] & ~PTE_MASK));
	vpt[VPN(va)] = 0;
}

void vm_set_perm(pde_t* pgdir, uintptr_t va, int perm)
{
	ASSERT((va & (PGSIZE-1)) == 0, "va is not aligned");
	ASSERT(vm_is_mapped(pgdir, va), "va is not mapped");
	ASSERT((perm & ~PTE_MASK) == 0, "invalid permissions");
	
	swtch_pgdir_if_needed(pgdir);

	vpt[VPN(va)] &= ~PTE_MASK;
	vpt[VPN(va)] |= perm | PTE_P;
}

int vm_get_perm(pde_t* pgdir, uintptr_t va)
{
	ASSERT(vm_is_mapped(pgdir, va), "va is not mapped");
	ASSERT((va & (PGSIZE-1)) == 0, "va is not aligned");
	
	swtch_pgdir_if_needed(pgdir);
	return vpt[VPN(va)] & ~PTE_MASK;
}

void vm_swtch_uvm()
{
	// TODO: assert interrupts disabled
	swtch_pgdir_if_needed(cpu->currproc->pgdir);
	cpu->ts.ss0 = SEG_KDATA << 3;
	cpu->ts.esp0 = (uintptr_t)cpu->currproc->kstack + KSTKSIZE;
	ltr(SEG_TSS << 3);
}


/* TESTS */

static void test_it_should_insert_new_page()
{
	uintptr_t va1, va2;
	struct page_t* page1, *page2;
	physaddr_t pa1, pa2;
	int rc;

	va1 = 0x800000;
	va2 = 0x900000;

	rc = vm_insert_page(NULL, va1, PTE_W);
	ASSERT(rc == 0, "unable to insert page");

	pa1 = vm_va2pa(NULL, va1);
	page1 = pa2page(pa1);

	ASSERT(page1->ref == 1, "allocated page has wrong ref count");

	rc = vm_map(NULL, va2, pa1, 0);
	ASSERT(rc == 0, "unable to map page");
	ASSERT(page1->ref == 2, "allocated page has wrong ref count after vm_map");

	*(int*)va1 = 0xB00B5;
	ASSERT(*(int*)va2 == 0xB00B5, "mapping failed to map same pa to different va");

	vm_unmap(NULL, va2);
	ASSERT(page1->ref == 1, "allocated page has wrong ref count after unmap va2");

	vm_unmap(NULL, va1);
	ASSERT(page1->ref == 0, "allocated page has wrong ref count after unmap va1");

	rc = vm_insert_page(NULL, va1, PTE_W);
	ASSERT(rc == 0, "unable to insert page");
	*(int*)va1 = 0xABC;

	rc = vm_insert_page(NULL, va2, PTE_W);
	ASSERT(rc == 0, "unable to insert page");
	*(int*)va2 = 0xEDF;

	ASSERT(*(int*)va1 == 0xABC, "failed to allocate different pa");

	pa1 = vm_va2pa(NULL, va1);
	page1 = pa2page(pa1);
	ASSERT(page1->ref == 1, "page has invalid ref cound");
	
	pa2 = vm_va2pa(NULL, va2);
	page2 = pa2page(pa2);
	ASSERT(page2->ref == 1, "page has invalid ref cound");
	
	vm_unmap(NULL, va1);
	vm_unmap(NULL, va2);

	ASSERT(pa1 != pa2, "did not allocate different pa");
	ASSERT(page1->ref == 0, "page has invalid ref cound");
	ASSERT(page2->ref == 0, "page has invalid ref cound");
}

static void it_should_allocate_memory_in_kernel()
{

	pde_t* kpgdir;
	extern pde_t bootpgdir[];
	int* kpg;
	uintptr_t cr3;

	kpgdir = vm_kalloc_page();
	kpg = vm_kalloc_page();

	ASSERT(kpgdir != NULL, "kalloc failed");
	ASSERT(kpg != NULL, "kalloc failed");
	ASSERT((uintptr_t)kpg != (uintptr_t)kpgdir, "kalloc allocated the same page twice");

	vm_init_pgdir(kpgdir);
	kpg[0] = 0xBADA55;
	cr3 = rcr3();
	swtch_pgdir_if_needed(kpgdir);
	ASSERT(rcr3() != cr3, "did not switch page dirs");
	ASSERT(kpg[0] == 0xBADA55, "swithcing page dirs caused invliad data");
	
	kpg[1] = 0xBAD1;
	cr3 = rcr3();
	swtch_pgdir_if_needed(bootpgdir);
	ASSERT(rcr3() != cr3, "did not switch page dirs");
	ASSERT(kpg[1] == 0xBAD1, "swithcing page dirs caused invliad data");

}

static void test()
{
	int rc;
	monitor_write("testing: vm\n");

	monitor_write("\ttest_it_should_insert_new_page ... ");
	test_it_should_insert_new_page();
	monitor_write("OK\n");

	monitor_write("\ttest_it_should_allocate_memory_in_kernel ... ");
	it_should_allocate_memory_in_kernel();
	monitor_write("OK\n");
}

