/*
	Copyright (C) 2014 Salil Bhagurkar

	This file is part of illusion

	illusion is free software: you can redistribute it and/or modify
	it under the terms of the GNU Lesser General Public License as published by
	the Free Software Foundation, either version 3 of the License, or
	(at your option) any later version.

	illusion is distributed in the hope that it will be useful,
	but WITHOUT ANY WARRANTY; without even the implied warranty of
	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
	GNU Lesser General Public License for more details.

	You should have received a copy of the GNU Lesser General Public License
	along with illusion.  If not, see <http://www.gnu.org/licenses/>.
*/

#include <arch/page.h>
#include <arch/arch.h>
#include <arch/power.h>
#include <klib/lib.h>
#include <klib/memory.h>
#include <kernel/console.h>
#include <kernel/page.h>
#include <kernel/errors.h>
#include <kernel/vmpage.h>
#include <kernel/init.h>

#define PAGE_ADDRMASK 0xfffff000
#define PAGE_FLAGMASK 0xfff


//TODO Need to improve error handling here

/*
 * TODO Right now we are enabling access to the page directory and
 * then immediately to the page table, which is a big loss of performance.
 * We should possibly have the page directory already permanenly mapped in and
 * not require enabling access. There could also possibly be a mechanism which
 * allows caching of the meta access mappings in order to enable access to
 * deeper page structures in case of long mode.
 */

static int get_dir_index(unsigned long vaddr)
{
	return (vaddr >> 22) & 0x3ff;
}

static int get_table_index(unsigned long vaddr)
{
	return (vaddr >> 12) & 0x3ff;
}

static unsigned long address_from_index(unsigned long dir_index, unsigned long table_index)
{
	return dir_index << 22 | table_index << 12;
}


static void update_page_directory(unsigned long *pgdir)
{
	__asm__ volatile ("mov %0, %%cr3" : : "r" (pgdir));
}

static int page_translating = false;
/*
 * The current page system pointer is used to enable kernel meta access
 * within a page system which is online, but we are performing  mapping
 * in some other page system, while sitting in another page system.
 * This is specifically required when creating mappings for a new process.
 */
static struct page_sys *current_page_sys;

void implement_page_sys(struct page_sys *page_sys)
{
	update_page_directory(page_sys->pg_dir);
	current_page_sys = page_sys;
	debug("Implementing page system base = 0x%x\n", page_sys->pg_dir);
}

void prepare_implement_page_sys(struct page_sys *page_sys)
{
	current_page_sys = page_sys;
	debug("Prepare implement page system base = 0x%x\n", page_sys->pg_dir);
}

void enable_page_translation()
{
	unsigned long cr0;
	__asm__ volatile ("mov %%cr0, %0" : "=r" (cr0));
	cr0 |= 0x80000000;
	__asm__ volatile ("mov %0, %%cr0" : : "r" (cr0));
	page_translating = true;
	debug("Page translation now enabled\n");
}

void disable_page_translation()
{
	unsigned long cr0;
	__asm__ volatile ("mov %%cr0, %0" : "=r" (cr0));
	cr0 &= ~0x80000000;
	__asm__ volatile ("mov %0, %%cr0" : : "r" (cr0));
	page_translating = false;
	debug("Page translation now disabled\n");
}

bool is_page_translating()
{
	return page_translating;
}

#define NR_H_LEVELS 3
static char *h_level_name[] = {"Page Directory Base", "Page Directory",
		"Page Table"};

static void fetch_hierarchy(struct page_sys *page_sys, unsigned long **page,
		int *index, unsigned long vaddr, int h_level)
{
	if(h_level == 0) {
		*page = (unsigned long *)&(page_sys->pg_dir);
		*index = 0;
	} else if(h_level == 1) {
		*page = page_sys->pg_dir;
		*index = get_dir_index(vaddr);
	} else if(h_level == 2) {
		unsigned long *pg_dir = (unsigned long *)page_sys->pg_dir;
		int dir_index = get_dir_index(vaddr);
		*page = (unsigned long *)(pg_dir[dir_index] & PAGE_ADDRMASK);
		*index = get_table_index(vaddr);
	}
}

static int map_address2(struct page_sys *page_sys,  unsigned long phys,
		unsigned long vaddr, unsigned long *flag_set,
		unsigned long trans)
{
	unsigned long *page;
	int index, h_level = 0;
	for(h_level = 0; h_level < NR_H_LEVELS; h_level++) {
		fetch_hierarchy(page_sys, &page, &index, vaddr, h_level);
		if(page[index] == 0) {
			//Alllow a create only if intended
			if(trans & MAP_CREATE) {
				unsigned long phys =
					(unsigned long)phys_get_pages(1);
				//Zero the new page
				memset((void *)phys, 0, PAGE_SIZE);
				page[index] = phys | flag_set[h_level];
				//debug("MAP_CREATE on %s = 0x%x @ %u\n",
				//	h_level_name[h_level], phys, index);
			}

			assertv(false,
					"Error: trans = %u, page = 0x%x, index = %u\n",
					trans, page, index);

			return ESTINV;
		} else {
			char done = 0;
			//Do the update specified
			if(trans & MAP_UPDATE_PHYS) {
				page[index] = phys | (page[index] &
						PAGE_FLAGMASK);
				done = 1;
				//debug("MAP_UPDATE_PHYS on %s = 0x%x @ %u\n",
				//	h_level_name[h_level], phys, index);
			}
			if(trans & MAP_UPDATE_FLAGS) {
				page[index] = (page[index] & PAGE_ADDRMASK) |
						flag_set[h_level];
				done = 1;
				//debug("MAP_UPDATE_FLAGS on %s = 0x%x @ %u\n",
				//	h_level_name[h_level],
				//	flag_set[h_level], index);
			}
			if(!done) {

				assertv(done,
						"Error: trans = %u, page = 0x%x, index = %u\n",
						trans, page, index);

				return ESTINV;
			}
		}
	}
	return 0;
}

//#define MAP_HIERARCHY_LEVEL 2
//
//static void *fetch_next_hierarchy(struct page_sys *page_sys, void *vaddr,
//			int *h_level, int *index)
//{
//	if((*h_level) == 2) {
//		(*h_level)--;
//		*index = get_dir_index((unsigned long)vaddr);
//		return page_sys->pg_dir;
//	} else if((*h_level) == 1) {
//		(*h_level)--;
//		*index = get_table_index((unsigned long)vaddr);
//	}
//}
//
//static int map_address_no_pgtrans(struct page_sys *page_sys, void *phys,
//		void *vaddr, unsigned long pde_flags, unsigned long pte_flags,
//		unsigned long trans)
//{
//	int h_level = MAP_HIERARCHY_LEVEL;
//	int index = 0;
//	void *meta = fetch_next_hierarchy(page_sys, vaddr, &h_level, &index);
//}

#define bad_map() assertv(false, "Invalid map state!\n")


//TODO BUG: When we update the PDE flags for the PTE, we must not change them back to kernel if they are changed to user once

//This function is used when we need to create/update page mappings
static void map_address_no_pgtrans(struct page_sys *page_sys, void *phys,
		void *vaddr, unsigned long pde_flags, unsigned long pte_flags,
		unsigned long trans)
{
	debug("map[0x%x] [0x%x->0x%x] PTE_F[0x%x]PDE_F[0x%x]TRANS[0x%x]\n",
			page_sys->pg_dir, vaddr, phys, pte_flags, pde_flags, trans);
	int dir_index = get_dir_index((unsigned long)vaddr);
	int table_index = get_table_index((unsigned long)vaddr);
	if(page_sys->pg_dir == null) {
		//Only create the page directory, if we intended a create
		if(trans & MAP_CREATE) {
			page_sys->pg_dir = (unsigned long *)phys_get_pages(1);
			unsigned long *pg_table = (unsigned long *)phys_get_pages(1);
			page_sys->pg_dir[dir_index] = (((unsigned long)pg_table) & PAGE_ADDRMASK) | pde_flags;
			memset(pg_table, 0, PAGE_SIZE);
			pg_table[table_index] = ((unsigned long)phys & PAGE_ADDRMASK) | pte_flags;
			return;
		}
		bad_map();
	} else {
		if(page_sys->pg_dir[dir_index] == 0) {
			//Only create the page table if a create was intended
			if(trans & MAP_CREATE) {
				unsigned long *pg_table =
					(unsigned long *)phys_get_pages(1);
				page_sys->pg_dir[dir_index] = (((unsigned long)pg_table) & PAGE_ADDRMASK) | pde_flags;
				memset(pg_table, 0, PAGE_SIZE);
				pg_table[table_index] = ((unsigned long)phys & PAGE_ADDRMASK) | pte_flags;
				return;
			}
			bad_map();
		} else {
			unsigned long *pg_table = (unsigned long *)(page_sys->pg_dir[dir_index] & PAGE_ADDRMASK);
			if(pg_table[table_index] == 0) {
				//Only create the entry if a create was intended
				if(trans & MAP_CREATE) {
					//Update the PDE flags if required
					if(trans & MAP_UPDATE_FLAGS) {
						page_sys->pg_dir[dir_index] = ((unsigned long)page_sys->pg_dir[dir_index] & PAGE_ADDRMASK) | pde_flags;
					}
					pg_table[table_index] = ((unsigned long)phys & PAGE_ADDRMASK) | pte_flags;
					return;
				}
				bad_map();
			} else if(trans & MAP_CREATE) {
				bad_map();
			} else {
				char done = 0;
				//We are now clear that an update is to be done
				//Figure out what kind and do it
				if(trans & MAP_UPDATE_FLAGS) {
					pg_table[table_index] =
					((unsigned long)pg_table[table_index] & PAGE_ADDRMASK) | pte_flags;
					done = 1;
				}
				if(trans & MAP_UPDATE_PHYS) {
					pg_table[table_index] = ((unsigned long)phys & PAGE_ADDRMASK) | ((unsigned long)pg_table[table_index] | PAGE_FLAGMASK);
					done = 1;
				}
				if(trans & MAP_UPDATE_FLAGS) {
					//Update the PDE flags too
					page_sys->pg_dir[dir_index] = ((unsigned long)page_sys->pg_dir[dir_index] & PAGE_ADDRMASK) | pde_flags;
				}
				if(!done)
					assertv(done, "Invalid map flags\n");
				return;
			}
		}
	}
}


//TODO Need some mechanism to free parent structures when all child entries are unused
static void unmap_address_no_pgtrans(struct page_sys *page_sys, void **was_phys, void *vaddr)
{
	debug("unmap [0x%x]\n", vaddr);
	int dir_index = get_dir_index((unsigned long)vaddr);
	int table_index = get_table_index((unsigned long)vaddr);
	if(page_sys->pg_dir == null) {
		bad_map();
	} else {
		if(page_sys->pg_dir[dir_index] == 0) {
			bad_map();
		} else {
			unsigned long *pg_table =
				(unsigned long *)(page_sys->pg_dir[dir_index] & PAGE_ADDRMASK);
			if(pg_table[table_index] == 0) {
				bad_map();
			} else {
				*was_phys = (void *)(pg_table[table_index] & PAGE_ADDRMASK);
				//Set the mapping to not present
				pg_table[table_index] = 0;
				debug("--was_phys = 0x%x\n", *was_phys);
			}
		}
	}
}


/*
 * Given a virtual address, this function will give the mapped physical address
 * inside a page mapping
 */
static void get_phys_address_no_pgtrans(struct page_sys *page_sys, void **phys,
		unsigned long *flags, void *vaddr)
{
	if(page_sys->pg_dir == null)
		bad_map();
	int dir_index = get_dir_index((unsigned long)vaddr);
	unsigned long *pg_table = (unsigned long *)(page_sys->pg_dir[dir_index] & PAGE_ADDRMASK);
	if(pg_table == null)
		bad_map();
	int table_index = get_table_index((unsigned long)vaddr);
	*phys = (void *)(pg_table[table_index] & PAGE_ADDRMASK);
	*flags = pg_table[table_index] & 0xfff;
}

//This enables access to a kernel page structure (page table or page directory)
//by using the address 'mav'.
static void enable_kernel_meta_access(void *address)
{
	//We enable access to an address by mapping the address in temporarily
	//into the currently online page system
	struct page_sys *page_sys = current_page_sys;
	if(((unsigned long)page_sys->mava[page_sys->mav_index] & PAGE_ADDRMASK) != ((unsigned long)address)) {
		page_sys->mava[page_sys->mav_index] = (((unsigned long)address) & PAGE_ADDRMASK) | PTE_PRESENT | PTE_WRITE;
		update_page_directory(page_sys->pg_dir);
	}
}

//This function is used when we need to do mapping for the kernel
//while the mapping itself is active. Hence, in this case, the kernel
//page directory or a page table within it may not be mapped in.
//So we need to map it in using the meta data accessor 'mav'.
//This function is used when we need to create/update page mappings
static void map_address_pgtrans(struct page_sys *page_sys, void *phys, void *vaddr,
	unsigned long pde_flags, unsigned long pte_flags, unsigned long trans)
{
	debug("map[0x%x] [0x%x->0x%x] PTE_F[0x%x]PDE_F[0x%x]TRANS[0x%x]\n",
			page_sys->pg_dir, vaddr, phys, pte_flags, pde_flags, trans);
	int dir_index = get_dir_index((unsigned long)vaddr);
	int table_index = get_table_index((unsigned long)vaddr);
	if(page_sys->pg_dir == null) {
		//Only create the page directory, if we intended a create
		if(trans & MAP_CREATE) {
			page_sys->pg_dir = (unsigned long *)phys_get_pages(1);
			unsigned long *pg_table = (unsigned long *)phys_get_pages(1);
			enable_kernel_meta_access(page_sys->pg_dir);
			page_sys->mav[dir_index] = (((unsigned long)pg_table) & PAGE_ADDRMASK) | pde_flags;
			enable_kernel_meta_access(pg_table);
			memset(page_sys->mav, 0, PAGE_SIZE);
			page_sys->mav[table_index] = ((unsigned long)phys & PAGE_ADDRMASK) | pte_flags;
			return;
		}
		bad_map();
	} else {
		enable_kernel_meta_access(page_sys->pg_dir);
		if(page_sys->mav[dir_index] == 0) {
			//Only create the page table if a create was intended
			if(trans & MAP_CREATE) {
				unsigned long *pg_table = (unsigned long *)phys_get_pages(1);
				page_sys->mav[dir_index] = (((unsigned long)pg_table) & PAGE_ADDRMASK) | pde_flags;
				enable_kernel_meta_access(pg_table);
				memset(page_sys->mav, 0, PAGE_SIZE);
				page_sys->mav[table_index] = ((unsigned long)phys & PAGE_ADDRMASK) | pte_flags;
				return;
			}
			bad_map();
		} else {
			enable_kernel_meta_access(page_sys->pg_dir);
			unsigned long *pg_table = (unsigned long *)(page_sys->mav[dir_index] & PAGE_ADDRMASK);
			enable_kernel_meta_access(pg_table);
			if(page_sys->mav[table_index] == 0) {
				//Only create the entry if a create was intended
				if(trans & MAP_CREATE) {
					page_sys->mav[table_index] = ((unsigned long)phys & PAGE_ADDRMASK) | pte_flags;
					//Update the PDE flags if required
					if(trans & MAP_UPDATE_FLAGS) {
						enable_kernel_meta_access(page_sys->pg_dir);
						page_sys->mav[dir_index] = ((unsigned long)page_sys->mav[dir_index] & PAGE_ADDRMASK) | pde_flags;
					}
					return;
				}
				bad_map();
			} else if (trans & MAP_CREATE) {
				bad_map();
			} else {
				char done;
				//We are now clear that an update is to be done
				//Figure out what kind and do it
				if(trans & MAP_UPDATE_FLAGS) {
					page_sys->mav[table_index] = ((unsigned long)page_sys->mav[table_index] & PAGE_ADDRMASK) | pte_flags;
					done = 1;
				}
				if(trans & MAP_UPDATE_PHYS) {
					page_sys->mav[table_index] = ((unsigned long)phys & PAGE_ADDRMASK) | ((unsigned long)page_sys->mav[table_index] | PAGE_FLAGMASK);
					done = 1;
				}
				if(trans & MAP_UPDATE_FLAGS) {
					//Update the PDE flags too
					//enable_kernel_meta_access(page_sys, page_sys->pg_dir);
					page_sys->mav[dir_index] = ((unsigned long)page_sys->mav[dir_index] & PAGE_ADDRMASK) | pde_flags;
				}
				if(!done)
					assertv(done, "Invalid map flags\n");
				return;
			}
		}
	}
}

//TODO Need a way to free parent structure when all child entries have been freed
static void unmap_address_pgtrans(struct page_sys *page_sys, void **was_phys, void *vaddr)
{
	debug("unmap [0x%x]\n", vaddr);
	int dir_index = get_dir_index((unsigned long)vaddr);
	int table_index = get_table_index((unsigned long)vaddr);
	if(page_sys->pg_dir == null) {
		bad_map();
	} else {
		enable_kernel_meta_access(page_sys->pg_dir);
		if(page_sys->mav[dir_index] == 0) {
			bad_map();
		} else {
			unsigned long *pg_table = (unsigned long *)(page_sys->mav[dir_index] & PAGE_ADDRMASK);

			enable_kernel_meta_access(pg_table);
			if(page_sys->mav[table_index] == 0) {
				bad_map();
			} else {
				*was_phys = (void *)(page_sys->mav[table_index] & PAGE_ADDRMASK);
				//Set to not present
				page_sys->mav[table_index] = 0;
				debug("--was_phys = 0x%x\n", *was_phys);
				return;
			}
		}
	}
}



/*
 * Given a virtual address, this function will give the mapped virtual address
 * inside a page mapping.
 * This enables access to a page table when paging is enabled
 */
static void get_phys_address_pgtrans(struct page_sys *page_sys, void **phys, unsigned long *flags, void *vaddr)
{
	int dir_index = get_dir_index((unsigned long)vaddr);
	int table_index = get_table_index((unsigned long)vaddr);
	if(page_sys->pg_dir == null)
		bad_map();
	enable_kernel_meta_access(page_sys->pg_dir);
	unsigned long *pg_table = (unsigned long *)(page_sys->mav[dir_index] & PAGE_ADDRMASK);
	if(pg_table == null)
		bad_map();
	enable_kernel_meta_access(pg_table);
	*phys = (void *)(page_sys->mav[table_index] & PAGE_ADDRMASK);
	*flags = page_sys->mav[table_index] & 0xfff;
}


/*
 * Wrappers to above functions based on the page translation status
 */

void map_address(struct page_sys *page_sys, void *phys, void *vaddr,
	unsigned long pde_flags, unsigned long pte_flags, unsigned long trans)
{
	if(page_translating)
		map_address_pgtrans(page_sys, phys, vaddr, pde_flags, pte_flags, trans);
	else
		map_address_no_pgtrans(page_sys, phys, vaddr, pde_flags, pte_flags, trans);
}


void get_phys_address(struct page_sys *page_sys, void **phys, unsigned long *flags, void *vaddr)
{
	if(page_translating)
		get_phys_address_pgtrans(page_sys, phys, flags, vaddr);
	else
		get_phys_address_no_pgtrans(page_sys, phys, flags, vaddr);
}

void unmap_address(struct page_sys *page_sys, void **was_phys, void *vaddr)
{
	if(page_translating)
		unmap_address_pgtrans(page_sys, was_phys, vaddr);
	else
		unmap_address_no_pgtrans(page_sys, was_phys, vaddr);
}

static unsigned long construct_vaddr(int dir_index, int table_index)
{
	return (dir_index << 22) | (table_index << 12);
}


static void debug_pte_flags(unsigned long flags)
{
	console_printf("{P%u W%u U%u WT%u CD%u A%u D%u PAT%u G%u}",
			!!(flags & PTE_PRESENT), !!(flags & PTE_WRITE), !!(flags & PTE_USER),
			!!(flags & PTE_WRITE_THROUGH), !!(flags & PTE_CACHE_DISABLE),
			!!(flags & PTE_ACCESSED), !!(flags & PTE_ACCESSED), !!(flags & PTE_DIRTY),
			!!(flags & PTE_PAT), !!(flags & PTE_GLOBAL));
}

static void debug_pde_flags(unsigned long flags)
{
	console_printf("{P%u W%u U%u WT%u CD%u A%u PS%u}",
			!!(flags & PDE_PRESENT), !!(flags & PDE_WRITE), !!(flags & PDE_USER),
			!!(flags & PDE_WRITE_THROUGH), !!(flags & PDE_CACHE_DISABLE),
			!!(flags & PDE_PS), !!(flags & PDE_PS));
}


/*
 * Must be called only when page translation is disabled
 */
void debug_page_map_vaddr(void *base, void *vaddr, unsigned long nr_pages)
{
	unsigned long *pgdir = (unsigned long *)base;
	int dir_index = get_dir_index((unsigned long)vaddr);
	int table_index = get_table_index((unsigned long)vaddr);
	console_printf("Displaying Page Map\n");
	console_printf("pgdir @ 0x%x\n", pgdir);
	console_printf("pgdir[%u] = 0x%x", dir_index, pgdir[dir_index]);
	debug_pde_flags(pgdir[dir_index] & PAGE_FLAGMASK);
	console_printf("\n");
	unsigned long *pgtbl = (unsigned long *)(pgdir[dir_index] & PAGE_ADDRMASK);
	unsigned long i;
	for(i = table_index; i < (table_index + nr_pages); i++) {
		console_printf("   pgtbl[%u] = 0x%x -> 0x%x ", i, vaddr, pgtbl[i]);
		debug_pte_flags(pgtbl[i] & PAGE_FLAGMASK);
		console_printf("\n");
		vaddr += PAGE_SIZE;
	}
}


/*
 * Must be called only when page translation is disabled
 */
void debug_page_map(void *base)
{
	int i, j;
	unsigned long *pgdir = (unsigned long *)base;
	console_printf("pgd [0x%x]\n", pgdir);
	for(i = 0; i < 1024; i++) {
		if(pgdir[i] != 0) {
			console_printf("   ptb [0x%x]\n", pgdir[i]);
			unsigned long *pt = (unsigned long *)(pgdir[i] & PAGE_ADDRMASK);
			for(j = 0; j < 1024; j++) {
				if(pt[j] != 0) {
					console_printf("      [0x%x] -> [0x%x] ", construct_vaddr(i, j), pt[j]);
					debug_pte_flags(pt[j] & PAGE_FLAGMASK);
					console_printf("\n");
				}
			}
		}
	}
}


//Setup page mapping initially to enable access to the meta data
//accessors (mav)
void init_page_sys(struct page_sys *page_sys, void *kz_start, void *kz_end)
{
//	unsigned long flag_set[3] = {0, PDE_PRESENT | PDE_WRITE, PTE_PRESENT | PTE_WRITE};
//	map_address2(page_sys, 0, 0x40000000, flag_set, MAP_CREATE);
//
//	halt_system();

	//Create page directory, and first 256 page tables
	debug("Creating kernel page_sys 0x%x - 0x%x\n", kz_start, kz_end);
	page_sys->pg_dir = phys_get_pages(1);
	memset(page_sys->pg_dir, 0, PAGE_SIZE);
	int kz_start_i = get_dir_index((unsigned long)kz_start);
	int kz_end_i = get_dir_index((unsigned long)kz_end);
	int i;
	for(i = kz_start_i; i < kz_end_i; i++) {
		//Set the kernel flags for all page directories
		page_sys->pg_dir[i] = (unsigned long)phys_get_pages(1) | PDE_PRESENT | PDE_WRITE;
		//Zero the page
		memset((void *)(page_sys->pg_dir[i]), 0, PAGE_SIZE);
	}

	//Get two pages for 'mav', and 'mava'.
	page_sys->mav = phys_get_pages(1);
	debug("Page sys mav = 0x%x\n", page_sys->mav);
	//We use map_address now as we are not executing with paging enabled as of now
	map_address(page_sys, page_sys->mav, page_sys->mav, PDE_PRESENT | PDE_WRITE, PTE_PRESENT | PTE_WRITE, MAP_CREATE);
	int dir_index = get_dir_index((unsigned long)page_sys->mav);
	page_sys->mav_index = get_table_index((unsigned long)page_sys->mav);
	unsigned long *pte = (unsigned long *)(page_sys->pg_dir[dir_index] & PAGE_ADDRMASK);
	page_sys->mava = phys_get_pages(1);
	debug("Page sys mava = 0x%x\n", page_sys->mava);
	//Map mava to access the pte of ma.
	map_address(page_sys, pte, page_sys->mava, PDE_PRESENT | PDE_WRITE, PTE_PRESENT | PTE_WRITE, MAP_CREATE);
}

/*
 * Copies one page directory into another, given an address range
 * The destination page_sys is not online
 */
static void copy_top_level(struct page_sys *dest, void *start, void *end)
{
	//Get a vm page for the page directory, and then later free the virtual space for it
	dest->pg_dir = vm_get_pages(&kernel_vmmap, 1, PAGE_PRESENT | PAGE_WRITE);
	debug("New pg_dir (virt) = 0x%x\n", dest->pg_dir);
	memset(dest->pg_dir, 0, PAGE_SIZE);
	int start_dir_index = get_dir_index((unsigned long)start);
	int end_dir_index = get_dir_index((unsigned long)end);
	int i;
	if(page_translating) {
		enable_kernel_meta_access(kernel_vmmap.page_sys.pg_dir);
		for(i = start_dir_index; i < end_dir_index; i++) {
			dest->pg_dir[i] = kernel_vmmap.page_sys.mav[i];
		}
	} else {
		for(i = start_dir_index; i < end_dir_index; i++) {
			dest->pg_dir[i] = kernel_vmmap.page_sys.pg_dir[i];
		}
	}
	//Free the virtual space for the page directory
	//and assign the physical address instead
	void *phys_pg_dir;
	vm_unmap_address(&kernel_vmmap, &phys_pg_dir, dest->pg_dir);
	vm_free_virt(&kernel_vmmap, dest->pg_dir, 1);
	dest->pg_dir = phys_pg_dir;
	debug("Moving pg_dir to phys = 0x%x\n", dest->pg_dir);
}


/*
 * Creates a new page system
 */
struct page_sys *create_page_sys(struct page_sys *dest, void *kern_area_start, void *kern_area_end)
{
	debug("Creating page_sys from 0x%x - 0x%x\n", kern_area_start, kern_area_end);
	dest->mav = kernel_vmmap.page_sys.mav;
	dest->mava = kernel_vmmap.page_sys.mava;
	dest->mav_index = kernel_vmmap.page_sys.mav_index;
	copy_top_level(dest, kern_area_start, kern_area_end);
	return dest;
}

void destroy_page_sys(struct page_sys *page_sys, void *kern_area_start, void *kern_area_end)
{
	//Free all the page tables that are linked to this page directory first
	enable_kernel_meta_access(page_sys->pg_dir);
	unsigned long kern_start_idx = get_dir_index((unsigned long)kern_area_start);
	unsigned long kern_end_idx = get_dir_index((unsigned long)kern_area_end);
	unsigned long i;
	for(i = 0; i < kern_start_idx; i++) {
		if(current_page_sys->mav[i] != 0) {
			phys_free_pages((void *)current_page_sys->mav[i], 1);
		}
	}
	for(i = kern_end_idx + 1; i < 1024; i++) {
		if(current_page_sys->mav[i] != 0) {
			phys_free_pages((void *)current_page_sys->mav[i], 1);
		}
	}
	//Free the page directory
	phys_free_pages(page_sys->pg_dir, 1);
}
