#include "vmm.h"
#include "pmm.h"
#include "kmm.h"
#include "vmm_ptm.h"
#include "dbgio.h"
uint32_t addr_pool = 0;
uint32_t addr_base = 0;
pdirectory *vmm_cur_directory;
pdirectory *kernel_directory;

void pt_set_frame(pt_entry *page, physical_addr frame){
	*page = ((*page) & ~PAGE_ADDRESS_MASK) | (frame & PAGE_ADDRESS_MASK);
}

void pt_set_flag(pt_entry *page,uint32_t flag){
	*page = ((*page) | flag);
}

void pt_clear_flag(pt_entry *page,uint32_t flag){
	*page = ((*page) & ~flag);
}

void pd_set_ptr(pd_entry *page, physical_addr ptr){
	*page = ((*page) & ~PAGE_ADDRESS_MASK) | (ptr & PAGE_ADDRESS_MASK);
}

void pd_set_flag(pd_entry *page,uint32_t flag){
	*page = ((*page) | flag);
}

void pd_clear_flag(pd_entry *page,uint32_t flag){
	*page = ((*page) & ~flag);
}

inline pt_entry* pt_lookup_entry (ptable* p,virtual_addr addr) {
	if (p)
		return &p->pages[(addr >> VPTR_PAGE_OFFSET) & VPTR_PAGE_MASK];
	return 0;
}

inline pd_entry* pd_lookup_entry (pdirectory* p,virtual_addr addr) {
	if (p)
		return &p->tables[(addr >> VPTR_DIRECTORY_OFFSET) & VPTR_DIRECTORY_MASK];
	return 0;
}

inline ptable* pd_lookup_table (pdirectory* p,virtual_addr addr) {
	if (p)
		return p->tables_virt[(addr >> VPTR_DIRECTORY_OFFSET) & VPTR_DIRECTORY_MASK];
	return 0;
}
void	vmm_paging_enable (char b) {

#ifdef _MSC_VER
	_asm {
		mov	eax, cr0
		cmp [b], 1
		je	enable
		jmp disable
enable:
		or eax, 0x80000000		//set bit 31
		mov	cr0, eax
		jmp done
disable:
		and eax, 0x7FFFFFFF		//clear bit 31
		mov	cr0, eax
done:
	}
#endif
}

bool vmm_is_paging () {

	uint32_t res=0;

#ifdef _MSC_VER
	_asm {
		mov	eax, cr0
		mov	[res], eax
	}
#endif

	return (res & 0x80000000) ? 0 : 1;
}

void vmm_load_PDBR (physical_addr addr) {

#ifdef _MSC_VER
	_asm {
		mov	eax, [addr]
		mov	cr3, eax		// PDBR is cr3 register in i86
	}
#endif
}
inline bool vmm_switch_pdirectory (pdirectory* dir) { 
	if (!dir)
		return false; 
	vmm_cur_directory = dir;
	vmm_load_PDBR (dir->directory_ptr);
	return true;
}
void vmm_flush_tlb_entry (virtual_addr addr) {
 
#ifdef _MSC_VER
	_asm {
		cli
		invlpg	addr
		sti
	}
#endif
}
pdirectory *vmm_get_current_directory(){
	return vmm_cur_directory;
}

char vmm_alloc_page(pt_entry *page,char writable,physical_addr *phys){
	void *frame = pmm_alloc_block();
	if (!frame)
		return 0;
	pt_set_frame(page,(physical_addr) frame);
	pt_set_flag (page,PAGE_PRESENT);
	if (phys != 0)
		*phys = (physical_addr) frame;	
	pt_set_flag(page,PAGE_USERMODE);
    if (writable)
		pt_set_flag(page,PAGE_WRITABLE);
    else
		pt_clear_flag(page,PAGE_WRITABLE);
	return 1;
}

/*************************************************
 * Hacky way of creating a page table before PTM *
 * is set up.									 *
 *************************************************/
void vmm_create_pt(virtual_addr base){
   pd_entry* e = pd_lookup_entry(vmm_cur_directory,base);
   ptable *table = pd_lookup_table(vmm_cur_directory,base);
   if ( (*e & PAGE_PRESENT) != PAGE_PRESENT) {
		physical_addr table_phys;
		table = (ptable *) addr_pool;//kmalloc_ap(sizeof(ptable), &table_phys);
		addr_pool += sizeof(ptable);
		if ((addr_pool-addr_base) > 0x40000)
			dprintf("ERROR overflow of pre paging PT ram %x %x\n",addr_pool,addr_base);
		table_phys = (physical_addr)(((uint32_t)table) - 0xBFF00000);
		memset((void *)table,0,sizeof(ptable));
		vmm_cur_directory->
			tables_virt[(((uint32_t)base) >> VPTR_DIRECTORY_OFFSET) & VPTR_DIRECTORY_MASK]
			= table;
		pd_set_ptr(e, table_phys);
		pd_set_flag(e, PAGE_PRESENT);
	    pd_set_flag(e,PAGE_WRITABLE);
	    pd_set_flag(e,PAGE_USERMODE);	
   }
}
void vmm_map_page(void* phys, void* virt,char writable,char user);

void *vmm_ptm_malloc(physical_addr *phys){
	virtual_addr virt = (virtual_addr) ptm_alloc_block();
	*phys = (physical_addr) pmm_alloc_block();
	vmm_map_page((void*)*phys,(void *)virt,1,1);
	return (void *) virt;
}

void vmm_alloc_page_v(void* virt,char writable,char user){
   pd_entry* e = pd_lookup_entry(vmm_cur_directory,(virtual_addr) virt);
   ptable *table = pd_lookup_table(vmm_cur_directory,(virtual_addr) virt);
   //dprintf("ap virt:0x%x pde 0x%x\n",virt,*e);
   if ( (*e & PAGE_PRESENT) != PAGE_PRESENT) {
		physical_addr table_phys;
		//dprintf("ap mk virt:0x%x pde 0x%x ptet 0x%x, ptep 0x%x\n",virt,*e,table,table_phys);
		table = (ptable *) vmm_ptm_malloc(&table_phys);
		//dprintf("ap mk2 virt:0x%x pde 0x%x ptet 0x%x, ptep 0x%x\n",virt,*e,table,table_phys);
		memset((void *)table,0,sizeof(ptable));
		vmm_cur_directory->
			tables_virt[(((uint32_t)virt) >> VPTR_DIRECTORY_OFFSET) & VPTR_DIRECTORY_MASK]
			= table;
		pd_set_ptr(e, table_phys);
		pd_set_flag(e, PAGE_PRESENT);
	    pd_set_flag(e,PAGE_WRITABLE);
	    pd_set_flag(e,PAGE_USERMODE);
   }
   pt_entry* p = pt_lookup_entry(table,(virtual_addr) virt);
   vmm_alloc_page(p,writable,0);
   if (writable)
       pt_set_flag(p,PAGE_WRITABLE);
   else
	   pt_clear_flag(p,PAGE_WRITABLE);
   if (user)
	   pt_set_flag(p,PAGE_USERMODE);
   else
	   pt_clear_flag(p,PAGE_USERMODE);
   //dprintf("ap virt:0x%x pte:0x%x pde 0x%x\n",virt,*p,*e);
   vmm_flush_tlb_entry ((virtual_addr) virt); 
}


void vmm_map_page(void* phys, void* virt,char writable,char user){
   pd_entry* e = pd_lookup_entry(vmm_cur_directory,(virtual_addr) virt);
   ptable *table = pd_lookup_table(vmm_cur_directory,(virtual_addr) virt);
  // dprintf("mp virt:0x%x phys:0x%x pde 0x%x\n",virt,phys,*e);
   if ( (*e & PAGE_PRESENT) != PAGE_PRESENT) {
		physical_addr table_phys;
	//	dprintf("mp mk virt:0x%x pde 0x%x ptet 0x%x, ptep 0x%x\n",virt,*e,table,table_phys);
		table = (ptable *) vmm_ptm_malloc(&table_phys);
		//dprintf("mp mk2 virt:0x%x pde 0x%x ptet 0x%x, ptep 0x%x\n",virt,*e,table,table_phys);
		memset((void *)table,0,sizeof(ptable));
		vmm_cur_directory->
			tables_virt[(((uint32_t)virt) >> VPTR_DIRECTORY_OFFSET) & VPTR_DIRECTORY_MASK]
			= table;
		pd_set_ptr(e, table_phys);
		pd_set_flag(e, PAGE_PRESENT);
	    pd_set_flag(e,PAGE_WRITABLE);
	    pd_set_flag(e,PAGE_USERMODE);
   }
   pt_entry* p = pt_lookup_entry(table,(virtual_addr) virt);
   pt_set_frame(p,(physical_addr) phys);
   pt_set_flag(p,PAGE_PRESENT);
   if (writable)
       pt_set_flag(p,PAGE_WRITABLE);
   else
	   pt_clear_flag(p,PAGE_WRITABLE);
   if (user)
	   pt_set_flag(p,PAGE_USERMODE);
   else
	   pt_clear_flag(p,PAGE_USERMODE);
   vmm_flush_tlb_entry ((virtual_addr) virt); 
}

void vmm_map_page_p(void* phys, void* virt,char writable,char user){
   pd_entry* e = pd_lookup_entry(vmm_cur_directory,(virtual_addr) virt);
   ptable *table = pd_lookup_table(vmm_cur_directory,(virtual_addr) virt);
   if ( (*e & PAGE_PRESENT) != PAGE_PRESENT) {
		physical_addr table_phys;
		table = (ptable *) addr_pool;//kmalloc_ap(sizeof(ptable), &table_phys);
		addr_pool += sizeof(ptable);
		if ((addr_pool-addr_base) > 0x40000)
			dprintf("ERROR overflow of pre paging PT ram %x %x\n",addr_pool,addr_base);
		table_phys = (physical_addr)(((uint32_t)table) - 0xBFF00000);
		memset((void *)table,0,sizeof(ptable));
		vmm_cur_directory->
			tables_virt[(((uint32_t)virt) >> VPTR_DIRECTORY_OFFSET) & VPTR_DIRECTORY_MASK]
			= table;
		//	dprintf(" creating ptable: 0x%x 0x%x 0x%x\n",(uint32_t) e,(uint32_t)table,(uint32_t)table_phys);
		pd_set_ptr(e, table_phys);
		pd_set_flag(e, PAGE_PRESENT);
	    pd_set_flag(e,PAGE_WRITABLE);	
	    pd_set_flag(e,PAGE_USERMODE);
   }
   pt_entry* p = pt_lookup_entry(table,(virtual_addr) virt);
   pt_set_frame(p,(physical_addr) phys);
   pt_set_flag(p,PAGE_PRESENT);
   if (writable)
       pt_set_flag(p,PAGE_WRITABLE);
   else
	   pt_clear_flag(p,PAGE_WRITABLE);	 
   if (user)
	   pt_set_flag(p,PAGE_USERMODE);
   else
	   pt_clear_flag(p,PAGE_USERMODE);
}

void vmm_free_page(pt_entry *page){
	physical_addr frame = (physical_addr)((*page) & PAGE_ADDRESS_MASK);
	if (frame)
		pmm_free_block((void *)frame);
	pt_clear_flag(page,PAGE_PRESENT);
}

void vmm_alloc_area(void* virt_s,void *virt_e,char writable,char user){
   //dprintf("aa virt_s:0x%x virt_e:0x%x\n",virt_s,virt_e);
	for (uint32_t page = (uint32_t) virt_s;page < (uint32_t) virt_e;page+=4096)       //Map 1MB..5MB to 3GB region
		vmm_alloc_page_v((void *)page,writable,user);
}

void vmm_kbrk(void* current_end,size_t size){
	if (size > 0)
		for (size_t s = 0;s < size;s+=4096)
			vmm_map_page(pmm_alloc_block(),(void *)(((uint32_t)current_end)+s),1,1);
	else {
		if (size > -4096)
			return;
		for (;size < 0;size+=4096){
			virtual_addr vad = (virtual_addr)(((int)current_end)+size);
			vmm_free_page(pt_lookup_entry(pd_lookup_table(vmm_cur_directory,vad),vad));
			vmm_flush_tlb_entry (vad); 
		}

	}
}

void vmm_initialize() {
	//Assume that KMM and PMM are running
	physical_addr phys;
	tprintf("[0x%X] - _vmm_initialize() \n",(void *)vmm_initialize);
	addr_base = addr_pool = (uint32_t) kmalloc(0x40000);
	addr_pool +=  0xFFF;
	addr_pool &= ~0xFFF;
	kernel_directory = (pdirectory *)addr_pool;//kmalloc_ap(sizeof(pdirectory),&phys);
	addr_pool += sizeof(pdirectory)+0xFFF;
	addr_pool &= ~0xFFF;
	phys = (physical_addr)(((uint32_t)kernel_directory) - 0xBFF00000);
	tprintf ("   _kernel_directory = 0x%x\n",(uint32_t)kernel_directory);
	memset(kernel_directory,0,sizeof(pdirectory));
	kernel_directory->directory_ptr = phys;
	tprintf ("   _kernel_directory->directory_ptr = 0x%x\n",(uint32_t)phys);
	tprintf ("   print: identity mapping now...\n");
	vmm_cur_directory = kernel_directory;
	for (int page = 0x0;page < 0x100000;page+=4096)	      //Identity map first megabyte of RAM/ROM
		vmm_map_page_p((void *) page, (void *) page,1,1);
	for (int page = 0x0;page < 0x600000;page+=4096)       //Map 1MB..5MB to 3GB region
		vmm_map_page_p((void *) (page + 0x100000),(void *) (page + 0xC0000000),1,1);
	for (int pt = (PTM_PTS_START >> VPTR_DIRECTORY_OFFSET) & VPTR_DIRECTORY_MASK;pt < VPTR_DIRECTORY_MASK;pt++)
		vmm_create_pt(pt << VPTR_DIRECTORY_OFFSET);
	tprintf ("   critical phase: loading page table...\n");
	vmm_switch_pdirectory (vmm_cur_directory);
	tprintf (" vmm_initialize: return\n");

}