/** 
* @file vmu.cpp
* Virtual Memory Unit (implementation).
* Unit for virtual memory managment: page directory, page table,
* page allocation.
*
* Copyrights 2010 Michal Saman, m.saman@designplus.cz.
* This source code is release under the Apache License 2.0.
* http://www.apache.org/licenses/LICENSE-2.0
*/

#include <env.h>
#include <string.h>
#include <stdint.h>
#include <mem/vmu.h>
#include <mem/vmu_pde.h>
#include <mem/vmu_pte.h>
#include <mem/pmu.h>
#include <mem/kheap.h>
#include <main/kprintf.h>

/** Current directory table. */
struct page_directory *cur_directory = 0;

/** Kernel directory table. */
struct page_directory *_kpage_dir;

/** Current page directory base register. */
physical_addr	cur_pdbr = 0;

/** 
* Lookup page directory and return page directory entrie (page table).
* @param pd	Pointer to page directory.
* @param addr From this Virtual addres is extracted index of PDE.
* @returns Page directory entrie structure / representant of one page table.
*/
inline pde * vmu_page_dir_lookup(struct page_directory *pd, virtual_addr addr) {
	if (pd)
		return &pd->entries[PAGE_DIRECTORY_INDEX(addr)];
	return 0;
}

/**
* Lookup page table and return page tale entrie (page).
* @param pt Pointer to page table.
* @patam addr From this Virtual address is extracted index of PTE.
* @returns Page table entrie structure / representant of one page.
*/
inline pte * vmu_page_table_lookup(struct page_table *pt, virtual_addr addr) {
	if (pt)
		return &pt->entries[PAGE_TABLE_INDEX(addr)];
	return 0;
}


/** 
* Sets page_direcotry as current page direcotry, sets processor PDBR.
* @param pd Page directory to sets.
* @returns Bool if switch success.
*/
inline bool vmu_switch_page_dir(struct page_directory *pd) {
	if (!pd)
		return false;
	cur_directory = pd;
	vmu_load_PDBR(cur_directory);
	return true;
}

/**
 * Duplicates page table including all present page contents and returns
 * physical address of new table.
 */
struct page_table* vmu_clone_page_table(struct page_table *spt, uint32_t *phys_addr) {
	//struct page_table *new_pt = vmu_create_page_dir();
	struct page_table *new_pt = (struct page_table*) kmalloc_p(sizeof(struct page_table), true, phys_addr);

	int i;
	for (i=0; i < PAGES_PER_TABLE; i++) {
		pte *e = &spt->entries[i];
		if (!e) continue;
		if (pte_is_present(*e)) {
			new_pt->entries[i] = *e;					// set same value as source (to copy attribs)
			vmu_alloc_page(&new_pt->entries[i]);		// alloc new page frame and set frame address
			clone_page( (void *)pte_get_frame(*e), new_pt->entries[i]*0x1000 );			// copy data from source frame(page) to new frame(page)
		}
	}

	return new_pt;
}

/**
 * Duplicates page directory including page contents and returns
 * physical address of new directory.
 */
struct page_directory* vmu_clone_page_dir(struct page_directory *spd) {
	uint32_t phys_addr;
	struct page_directory * new_dir = vmu_create_page_dir();
	kprintf("OLD PD at addr: 0x%x\n", spd);

	int i;
	for (i=0; i < PAGES_PER_DIR; i++) {
		struct page_table *pt = spd->entries_virt[i];
		if (!pt) continue;
		/*
		if (_kpage_dir->entries_virt[i] == pt) {
			// kernel table, so map it only
			new_dir->entries_virt[i] = pt;
			new_dir->entries[i] = spd->entries[i];
		} else {
		*/
			// copy page table
			new_dir->entries_virt[i] = vmu_clone_page_table( (struct page_table*) spd->entries_virt[i], &phys_addr );
			// change frame address of page table to cloned table
			pde_set_frame(&new_dir->entries[i], (physical_addr) phys_addr );
		//}
	}

	kprintf("NEW PD at virt. addr: 0x%x, phys. addr: 0x%x\n", new_dir, new_dir->pbdr);

	return new_dir;
}

/**
 * Creates new empty page directory and allocate physical memory for it's contents.
 */
inline struct page_directory* vmu_create_page_dir() {
	uint32_t pbdr;
	struct page_directory* pdir;
	pdir = (struct page_directory*) kmalloc_p(sizeof(struct page_directory), true, &pbdr);
	if (!pdir)
		return NULL;

	// clear page directory entries
	memset(pdir, 0, sizeof(pdir));

	pdir->pbdr = pbdr;

	kprintf("Creating new PD stored at phys.: 0x%x (size etrs.: %u, size: %u)\n", pdir, sizeof(pdir->entries), sizeof(*pdir));

	return pdir;
}

/**
* Flushes a cached translation lookaside buffer (TLB) entry.
* @param addr Virtual address to flush.
*/
void vmu_flush_tlb_entry(virtual_addr addr) {
	__asm__ volatile (
		"cli\n\t"
		"invlpg	%0\n\t"
		"sti"
		:
		: "m"(addr)
	);
}

/** 
* Returns current page directory which is loaded to processor's PDBR.
*/
struct page_directory * vmu_get_current_page_dir() {
	return cur_directory;
}

void vmu_show_page_info(pte *e) {
	kprintf("Page present: %i\n", *e & PTE_PRESENT);
}

/**
* Automatically allocate physical memory for page and set flag is present.
* @param e Pointer to page table entry.
* @returns Bool success.
*/
bool vmu_alloc_page(pte *e) {
	if (!e)
		return false;				// not valid page (page table entry)

	if (pte_is_present(*e))
		return true;				// page is already allocated and present

	void * p = pmu_alloc_block();

	if (!p)
		return false;  // out of physical memory (in future we must do some here magic and cache some allocated pages...)

	pte_set_frame(e, (physical_addr)p);
	pte_add_attrib(e, PTE_PRESENT);

	/*
	  	if (pte_get_frame(*e)%409600  == 0)
		kprintf("0x%x, ", p);
	 */

	return true;
}

/**
 * Manually maps virtual address to specified physical address.
 * Physicall address is set as occupuied by PMU manager.
 * @param phys	Physical address to map to.
 * @param virt 	Virtual addres to map from.
 */
void vmu_map_page(void* phys, void* virt) {
	pte *p = vmu_get_page((virtual_addr)virt);
	pte_set_frame(p, (physical_addr)phys);
	pte_add_attrib(p, PTE_PRESENT);
	//pmu_deinit_region((physical_addr)phys, PMU_BLOCK_SIZE);
	//kprintf("Page phys: 0x%x, virt: 0x%x, pte: 0x%x\n", phys, virt, p);
}

/**
* Free physical memory of page and clear flag is present.
* @param e Pointer to page table entry.
*/
void vmu_free_page(pte *e) {
	// get address of physical memory block
	void *p = (void *)pte_get_frame(*e);
	if (p)
		pmu_free_block(p);  // deallocate physical memory block
	pte_del_attrib(e, PTE_PRESENT);  // Page flag "is present" set to 0
}

pte * vmu_get_page(virtual_addr virt) {
	physical_addr pt_phys;
	// get current page directory
	struct page_directory * pd = vmu_get_current_page_dir();
	// get index of page_table
	uint32_t table_idx = PAGE_DIRECTORY_INDEX((uint32_t)virt);
	// get page_table pointer
	struct page_table * pt = pd->entries_virt[table_idx];
	//kprintf("PAGE TABLE> PDEntry addr: 0x%x, PDentry value: 0x%x, PT Present: %i\n", e,*e, (*e & PTE_PRESENT));
	if (!pt) {
		// page table is not present, allocate it
		pt = (struct page_table*) kmalloc_p(sizeof(struct page_table), true, &pt_phys);
		if (!pt)
			return NULL;
		kprintf("Creating new PT #%i stored at address: 0x%x for virt. addr.: 0x%x \n", PAGE_DIRECTORY_INDEX((uint32_t)virt), pt, virt);
		// clear page table (all 1024 pages)
		memset(pt, 0, sizeof(struct page_table));
		// create a new page directory entry (page table)
		pde_add_attrib((pde*)&pd->entries[table_idx], PDE_PRESENT);
		pde_add_attrib((pde*)&pd->entries[table_idx], PDE_WRITABLE);
		pde_set_frame((pde*)&pd->entries[table_idx], pt_phys);
		pd->entries_virt[table_idx] = pt;
	}

	return (pte *) &pt->entries[PAGE_TABLE_INDEX(virt)];
}

/** 
* Routine enable/disable memory paging mode for IA32.
* @param p True - enable, False - disable
*/
void vmu_do_paging(bool p) {
	__asm__ volatile (
		"mov %%cr0, %%eax\n\t"
		"cmp $1, %0\n\t"
		"je enable\n\t"
		"jmp disable\n\t"
"enable:\n\t"
		"or $0x80000000, %%eax\n\t" 	// set bit 31
		"mov %%eax, %%cr0\n\t"
		"jmp done\n\t"
"disable:\n\t"
		"and $0x7FFFFFFF, %%eax\n\t"  	// clear bit 31
		"mov %%eax, %%cr0\n\t"
"done:\n\t"
	:
	: "Nd"(p)
	);
}

void vmu_load_PDBR(struct page_directory *p) {
	// PDBR is cr3 register in ia-32
	__asm__ volatile ("mov	%0, %%cr3" : : "p"(p->pbdr) );
}

/**
* Routine to iniciale Virtual Memeory Unit.
* Inicialize page directory and two paging tables, one for identical
* mapping of very first 4mb of RAM. Second for kernel space, it maps 
* virtual address from 3Gb to physical 1Mb (where kernel is loaded).
*/
void vmu_init() {
	// create default directory table
	struct page_directory* pdir = vmu_create_page_dir();
	if (!pdir)
		return;
	_kpage_dir = pdir;

	// switch directory as current
	vmu_switch_page_dir(pdir);
	// first 4mb are identicaly mapped - page table 1
	int virt, phys, i;
	for(virt=0x0, phys=0x0, i=0; i < 1024; i++, virt+=4096, phys+=4096) {
		vmu_map_page((void*)phys, (void*)virt);
	}
	// map kernel space, it begins at 3gb virtually to 1mb phys
	/*
	for(virt=0xC0000000, phys=0x100000, i=0; i < 1024; i++, virt+=4096, phys+=4096) {
		if (i==0)
			kprintf(">>> Maping virtual: 0x%x to phys: 0x%x\n", virt, phys);
		vmu_map_page((void*)phys, (void*)virt);
	}
	*/

	// enable paging
	vmu_do_paging(true);
}
