#include <inc/types.h>
#include <inc/x86.h>
#include <inc/mmu.h>
#include <inc/string.h>
#include <inc/memlayout.h>
#include <inc/error.h>
#include <inc/assert.h>

#include <kern/cpu.h>
#include <kern/vm.h>
#include <kern/kalloc.h>
#include <kern/console.h>
#include <kern/proc.h>
#include <kern/cpu.h>

// #define DEBUG_VM

// Defined by kern/kernel.ld.
extern char data[];
// Kernel's page table directory.
pde_t *kpgdir;
// GDT. in struct CpuInfo

void
seg_init(void)
{
	// Map "logical" addresses to virtual addresses using identity map.
	// Cannot share a CODE descriptor for both kernel and user
	// because it would have to have DPL_USR, but the CPU forbids
	// an interrupt from CPL=0 to DPL=3.
	// Your code here.
	struct CpuInfo *cpu = thiscpu;
	cpu->gdt[SEG_KCODE] = SEG(STA_X | STA_R, 0x0, 0xffffffff, 0);
	cpu->gdt[SEG_KDATA] = SEG(STA_W, 0x0, 0xffffffff, 0);
	cpu->gdt[SEG_UCODE] = SEG(STA_X | STA_R, 0x0, 0xffffffff, DPL_USER);
	cpu->gdt[SEG_UDATA] = SEG(STA_W, 0x0, 0xffffffff, DPL_USER);
	cpu->gdt[SEG_TSS] = SEG(STS_T32A, &(cpu->cpu_ts), sizeof(struct taskstate) - 1, 0);
	cpu->gdt[SEG_TSS].s = 0;
#ifdef DEBUG_VM
	cprintf("seg_init: cpu %d ts ptr 0x%x\ngdt ptr 0x%x\nSEG_TSS: 0x%x\n", cpunum(), &(cpu->cpu_ts), cpu->gdt, cpu->gdt[SEG_TSS]);
#endif
	lgdt(cpu->gdt, sizeof(cpu->gdt));
	// Hints:
	// 1. You should set up at least four segments: kern code, kern data,
	// user code, user data;
	// 2. The various segment selectors, application segment type bits and
	// User DPL have been defined in inc/mmu.h;
	// 3. You may need macro SEG() to set up segments;
	// 4. We have implememted the C function lgdt() in inc/x86.h;
}

// Given 'pgdir', a pointer to a page directory, pgdir_walk returns
// a pointer to the page table entry (PTE) for linear address 'va'.
// This requires walking the two-level page table structure.
//
// The relevant page table page might not exist yet.
// If this is true, and alloc == false, then pgdir_walk returns NULL.
// Otherwise, pgdir_walk allocates a new page table page with kalloc.
// 		- If the allocation fails, pgdir_walk returns NULL.
// 		- Otherwise, the new page is cleared, and pgdir_walk returns
// a pointer into the new page table page.
//
// Hint 1: the x86 MMU checks permission bits in both the page directory
// and the page table, so it's safe to leave permissions in the page
// directory more permissive than strictly necessary.
//
// Hint 2: look at inc/mmu.h for useful macros that manipulate page
// table and page directory entries.
//
static pte_t *
pgdir_walk(pde_t *pgdir, const void *va, int32_t alloc)
{
	// DONE: Fill this function in
	pde_t pde;
	pte_t *pte_ptr;
	pde = pgdir[PDX(va)];
	if (PTE_ADDR(pde) || pde & PTE_P)
		return (pte_t *)PTE_ADDR(P2V(pde)) + PTX(va);
	else
		if (alloc) {
			pte_ptr = (pte_t *)kalloc();
			if (pte_ptr) {
				// QA: Whether to add PTE_W? Yes
				memset(pte_ptr, 0, PGSIZE);
				pgdir[PDX(va)] = (pde_t)((int)V2P(pte_ptr) | PTE_P | PTE_W | PTE_U);
				return pte_ptr + PTX(va);
			}
		} else
			return NULL;

	return NULL;
}

// Create PTEs for virtual addresses starting at va that refer to
// physical addresses starting at pa. va and size might **NOT**
// be page-aligned.
// Use permission bits perm|PTE_P for the entries.
//
// Hint: the TA solution uses pgdir_walk
static int
map_region(pde_t *pgdir, void *va, uint32_t size, uint32_t pa, int32_t perm)
{
	// DONE: Fill this function in
	// QA: What is the meaning of the return value? Maybe this function returns
	// the number of page entries created
	// QA: va_aligned should round down or round up? Round up
	pte_t *pte_ptr;
	uint32_t sz = 0, va_aligned = PGROUNDUP((int)va);
	int32_t cnt = 0;
	for (pa = (uint32_t)PGROUNDDOWN((int)pa);
			sz < size;
			sz += PGSIZE) {
		pte_ptr = pgdir_walk(pgdir, (void *)(va_aligned + sz), true);
		if (!pte_ptr) {
			panic("pgdir_walk in map_region: failure");
			return 0;
		}
		*pte_ptr = (pa + sz) | perm | PTE_P;
		cnt++;
	}
	return cnt;
}

// This table defines the kernel's mappings, which are present in
// every process's page table.
// The example code here marks all physical as writable.
// However this is not truly the case.
// kvm_init() should set up page table like this:
//
//	KERNBASE..KERNBASE+EXTMEM: mapped to 0..EXTMEM
// 									(for I/O space)
// 	KERNBASE+EXTMEM..data: mapped to EXTMEM..V2P(data)
//					for the kernel's instructions and r/o data
// 	data..KERNBASE+PHYSTOP: mapped to V2P(data)..PHYSTOP,
//					rw data + free physical memory
//  DEVSPACE..0: mapped direct (devices such as ioapic)
static struct kmap {
	void *virt;
	uint32_t phys_start;
	uint32_t phys_end;
	int perm;
} kmap[] = {
	// DONE: Modify the code to reflect the above.
	// DONE: debug
	// Q: DEVSPACE is mapped to?
	// { (void *)KERNBASE, 0, PHYSTOP, PTE_W},
	{ (void *)KERNBASE, 0, EXTMEM, PTE_W},
	{ (void *)KERNBASE + EXTMEM, EXTMEM, V2P(data), 0},
	{ (void *)data, V2P(data), PHYSTOP, PTE_W},
	{ (void *)DEVSPACE, DEVSPACE, 0, PTE_W}
};

// Set up kernel part of a page table.
// Return a pointer of the page table.
// Return 0 on failure.
//
// In general, you need to do the following things:
// 		1. kalloc() memory for page table;
// 		2. use map_region() to create corresponding PTEs
//			for each item of kmap[];
//
// Hint: You may need ARRAY_SIZE.
pde_t *
kvm_init(void)
{
	// DONE: your code here
	// DONE: debug
	pde_t *pde_ptr = (pde_t *)kalloc();
	if (!pde_ptr)
		return 0;
	memset(pde_ptr, 0, PGSIZE);
	for (int32_t i = 0; i < ARRAY_SIZE(kmap); i++) {
		map_region(pde_ptr,
				kmap[i].virt,
				kmap[i].phys_end - kmap[i].phys_start,
				kmap[i].phys_start,
				kmap[i].perm);
	}
	return pde_ptr;
}

// Switch h/w page table register to the kernel-only page table.
void
kvm_switch(void)
{
	lcr3(V2P(kpgdir)); // switch to the kernel page table
}

void check_vm(pde_t *pgdir);

// Allocate one page table for the machine for the kernel address
// space.
void
vm_init(void)
{
	kpgdir = kvm_init();
	if (kpgdir == 0)
		panic("vm_init: failure");
	kvm_switch();
	check_vm(kpgdir);
}

void
uvm_free(pde_t *pgdir)
{
	pte_t *t;
	void *temp;
	for (pde_t *i = pgdir; i < pgdir + PDX(KERNBASE); i++)
		if (*i & PTE_P) {
			t = (pte_t *)P2V(PTE_ADDR(*i));
			for (pte_t *j = t; j < t + NPDENTRIES; j++)
				if (*j & PTE_P) {
					kfree((char *)P2V(PTE_ADDR(*j)));
					*j = 0;
				}
		}
}

// Free a page table.
//
// Hint: You need to free all existing PTEs for this pgdir.
void
vm_free(pde_t *pgdir)
{
	// DONE: your code here
	// DONE: debug
	// QA: Whether to free pgdir? No
	pde_t *pde_ptr;
	pte_t *pte_ptr;
	for (pde_ptr = (uint32_t *)pgdir; pde_ptr < pgdir + NPDENTRIES; pde_ptr++)
		if (*pde_ptr & PTE_P) {
			kfree((char *)P2V(PTE_ADDR(*pde_ptr)));
			*pde_ptr = 0;
		}
}

void
check_vm(pde_t *pgdir)
{
	pte_t *pte_ptr;
	for (int32_t i = 0; i < ARRAY_SIZE(kmap); i++) {
#ifdef DEBUG_VM
		cprintf("Test kmap[%d]\n", i);
#endif
		for (int32_t pa = PGROUNDUP((int)kmap[i].phys_start), va = PGROUNDUP((int)kmap[i].virt);
				pa - kmap[i].phys_start < kmap[i].phys_end - kmap[i].phys_start;
				pa += PGSIZE, va += PGSIZE) {
			pte_ptr = pgdir_walk(pgdir, (void *)va, false);
			if (!pte_ptr) {
				cprintf("NULL PTE Address for Virtual Address: 0x%x\n", va);
				panic("check_vm: Failed to get PTE!\n");
			}
			assert(PTE_ADDR(*pte_ptr) == pa);
		}
	}
}

//
// Allocate len bytes of physical memory for proc,
// and map it at virtual address va in the proc's address space.
// Does not zero or otherwise initialize the mapped pages in any way.
// Pages should be writable by user and kernel.
// Panic if any allocation attempt fails.
//
void
region_alloc(struct proc *p, void *va, size_t len)
{
	// TODO: Your code here.
	// (But only if you need it for ucode_load.)
	//
	// Hint: It is easier to use region_alloc if the caller can pass
	//   'va' and 'len' values that are not page-aligned.
	//   You should round va down, and round (va + len) up.
	//   (Watch out for corner-cases!)
	void *start = (void *) PGROUNDDOWN((int)va), *end = (void *) PGROUNDUP((int)(va + len)), *tmp;
	pte_t *pte_ptr;
	uint32_t addr;
	for (void *i = start; i < end; i += PGSIZE) {
		tmp = (void *)kalloc();
		pte_ptr = pgdir_walk(p->pgdir, tmp, false);
		if (NULL == pte_ptr) {
			panic("region_alloc failed to get pte in kpgdir");
		}
		addr = PTE_ADDR(*pte_ptr);
		pte_ptr = pgdir_walk(p->pgdir, i, true);
		if (NULL == pte_ptr) {
			panic("region_alloc failed to get pte in upgdir");
		}
		*pte_ptr = addr | PTE_U | PTE_W | PTE_P;
	}
}

void
pushcli(void)
{
	int32_t eflags;

	eflags = read_eflags();
	cli();
	if (thiscpu->ncli == 0)
		thiscpu->intena = eflags & FL_IF;
	thiscpu->ncli += 1;
}

void
popcli(void)
{
	if (read_eflags() & FL_IF)
		panic("popcli - interruptible");
	
	if (--thiscpu->ncli < 0)
		panic("popcli");
	
	if (thiscpu->ncli == 0 && thiscpu->intena)
		sti();
}

//
// Switch TSS and h/w page table to correspond to process p.
// EDITED - load the TSS and switch the page table
void
uvm_switch(struct proc *p)
{
	// TODO: your code here.
	//
	// Hints:
	// - You may need pushcli() and popcli()
	// - You need to set TSS and ltr(SEG_TSS << 3)
	// - You need to switch to process's address space
	struct CpuInfo *cpu;
	struct taskstate *ts;
#ifdef DEBUG_VM
	cprintf("uvm_switch: taskstate ptr 0x%x\npgdir ptr 0x%x\n", ts, p->pgdir);
	check_vm(p->pgdir);
	cprintf("uvm_switch: gdt tss: 0x%x 0x%x\n",
			((uint32_t *) (cpu->gdt + SEG_TSS))[1], ((uint32_t *) (cpu->gdt + SEG_TSS))[0]);
#endif
	pushcli();
	cpu = thiscpu;
	ts = &(cpu->cpu_ts);
	cpu->gdt[SEG_TSS] = SEG16(STS_T32A, ts, sizeof(*ts)-1, 0);
	cpu->gdt[SEG_TSS].s = 0;
	cpu->gdt[SEG_TSS].type = STS_T32A;
	ts->esp0 = (uintptr_t) p->kstack + KSTACKSIZE;
	ts->ss0 = SEG_KDATA << 3;
	ts->iomb = 0xFFFF; // sizeof(struct taskstate);
	ltr(SEG_TSS << 3);
#ifdef DEBUG_VM
	cprintf("uvm_switch: after ltr gdt tss: 0x%x 0x%x\n",
			((uint32_t *) (cpu->gdt + SEG_TSS))[1], ((uint32_t *) (cpu->gdt + SEG_TSS))[0]);
#endif
	lcr3(V2P(p->pgdir)); // switch to the user page table
#ifdef DEBUG_VM
	cprintf("uvm_switch: after switch pg gdt tss: 0x%x 0x%x\n",
			((uint32_t *) (cpu->gdt + SEG_TSS))[1], ((uint32_t *) (cpu->gdt + SEG_TSS))[0]);
#endif
	popcli();
}

void
pte_copy(pte_t *t, const pte_t *s)
{
	void *temp;
	for (uint32_t i = 0; i < NPDENTRIES; i++)
		if (s[i] & PTE_P)
		{
			temp = (void *)kalloc();
			if (NULL == temp)
				panic("kalloc failed");
			memcpy(temp, (void *)P2V(PTE_ADDR(s[i])), PGSIZE);
			t[i] = (pte_t)((int)V2P(temp) | PTE_FLAGS(s[i]));
		}
}

void
uvm_copy(pde_t *dst, const pde_t *src)
{
	pte_t *t;
	void *temp;
	for (uint32_t i = 0; i < PDX(KERNBASE); i++)
		if (src[i] & PTE_P) {
			temp = (void *)kalloc();
			if (NULL == temp)
				panic("kalloc failed");
			memset(temp, 0, PGSIZE);
			dst[i] = (pde_t)((int)V2P(temp) | PTE_FLAGS(src[i]));
			pte_copy((pte_t *)temp, (pte_t *)P2V(PTE_ADDR(src[i])));
		}
}
