/*
 *  The virtual.c file implements some functions regarding virtual memory management
 */


#include <vmm.h>
#include <klib.h>
#include <panic.h>
#include <i386_arch.h>
#include <globals.h>
#include <spinlock.h>


// Local includes
#include "include/i386_paging.h"
#include "include/allocator.h"
#include "include/vmm_const.h"

// Implement functions here

spinlock_t paging_mutex;

/* Private data structures */

dword_t PageDirectory;
dword_t end_of_kernel_page;

/* These two variables keep track of the dynamic memory allocated by the kernel */
dword_t core_start,core_end;

/* Private functions prototypes */

void paging_enable(dword_t diraddr);
extern void paging_enable_asm(void);
void *morecore(long increment);
void virtual_to_pde_pte(dword_t virtual_address,dword_t *pde_idx,dword_t *pte_idx);
void virtual_to_pde_pte_ptr(dword_t virtual_address,PDE_T **pde,PTE_T **pte);

int memory_map(dword_t virt_address,dword_t phys_address,byte_t access);
int memory_unmap(dword_t virt_address,dword_t *phys_address);

void allocate_page(dword_t virt_address,byte_t access);
void release_page(dword_t virt_address);

/* Private functions implementation */

void paging_enable(dword_t diraddr)
  {
    // initialize_mutex
    lock_initialize(&paging_mutex);

    //dword_t pdbr,cr;    

    //write_cr3(pdbr);
    write_cr3(diraddr);
    
    //Enable Paging in cr0 (PG bit = 1)
    paging_enable_asm();

    //set_if();
    
    //Flush caches (and Translation Lookaside Buffer (TLB))
    //writeback_invalidate_cache();
    //invalidate_cache();
  }


/* Public functions implementation */

void virtual_init(void)
  { 
    PDE_T *dir;
    dword_t tmpaddr;
    //byte_t *test;
    unsigned int i,j;

    // Initialize low-level allocator
    ivmmAllocator_initialize();

    /*
     *  Get some low memory pages to store our memory tables.
     *  To cover up to 16MB we need 4 PDEs (covering 4MB each) moreover we have to store the Page Directory itself
     *  Therefore we need 4 pages to store each Page Table plus 1 page for the Page Directory
     *  That's 5 Pages altogether ie 20KB.
     */
    
    // Get a page in low-memory to store the Page Directory
    PageDirectory=ivmmAllocator_AllocatePage();        // it's a phys address
    dir=(PDE_T *)(PageDirectory+RELOC_PATCH);          // get the virt address

    // Initialize all PDEs to NOT PRESENT
    for (i=0;i<PAGE_DIR_SIZE;i++)
      ivmmPDE_setAccess(dir+i,PG_NOT_PRESENT);
      
    // 0 - 4 MB    
    tmpaddr=ivmmAllocator_AllocatePage();                  // Get a page to store the PTEs
    ivmmPDE_setAccess(dir,PG_PRESENT|PG_WRITE|PG_SUPER);   // Set access to page present,superuser,write
    ivmmPDE_setOsBits(dir,0);                              // Set OS reserved bits to 0
    ivmmPDE_setAddress(dir,tmpaddr);                       // Store the Page Table's addreess into the PDE
    // Set all the PTEs to NOT PRESENT (4 = PTE size)
    for (j=0;j<PAGE_TAB_SIZE;j++)
      ivmmPTE_setAccess((PTE_T *)(tmpaddr+4*j+RELOC_PATCH),PG_NOT_PRESENT);  // Set frame to NOT PRESENT (other bits will be ignored)
    

    dir+=512;  // PDE relative to 2 GiB

    //Get another PDE for the Kernel
    tmpaddr=ivmmAllocator_AllocatePage();                  // Get a page to store the PTEs
    ivmmPDE_setAccess(dir,PG_PRESENT|PG_WRITE|PG_SUPER);   // Set access to page present,superuser,write
    ivmmPDE_setOsBits(dir,0);                              // Set OS reserved bits to 0
    ivmmPDE_setAddress(dir,tmpaddr);                       // Store the Page Table's addreess into the PDE
    // Set all the PTEs to NOT PRESENT
    for (j=0;j<PAGE_TAB_SIZE;j++)
      ivmmPTE_setAccess((PTE_T *)(tmpaddr+4*j+RELOC_PATCH),PG_NOT_PRESENT);  // Set frame to NOT PRESENT (other bits will be ignored)


    // Map the first MB of memory 1:1 with virtual memory
    // Note: GDT is in the Kernel, we need to reload the GDTR with a higher address later on after paging is enabled;

    dir=(PDE_T *)(PageDirectory+RELOC_PATCH);    
    //for (j=0;j<4096;j++)  // 256*4K = 1M
    for (j=0;j<PAGE_TAB_SIZE;j++)  
      {	
	PTE_T *page;
	
	page=(PTE_T *)(ivmmPDE_getAddress(dir)+RELOC_PATCH);
	page+=j;                                                  // j-th PTE

	tmpaddr=j*0x1000;	                                 // Physical address from 0x00000000  (i*4MB+j*4KB)
	ivmmPTE_setAccess(page,PG_PRESENT|PG_WRITE|PG_SUPER);      // Page present, superuser write
	ivmmPTE_setOsBits(page,0);                                 // OS reserved bits set to 0
	ivmmPTE_setAddress(page,tmpaddr);                          // physical address	  
	
	// set the physical page to busy in the allocator
	//
	// don't actually do this, because otherwise 
	// we wouldn't know how to deallocate them later
	// ivmmAllocator_setPageBusy(tmpaddr);	  
      }
    
    dir+=512;
    // Calculate how many PTE to allocate for Kernel
    j=Kernel_End-Kernel_Start;
    i= j % 4096 ? 1 : 0;
    j/=4096;
    i+=j;
    end_of_kernel_page=i;


    // Make sure they're 4 MB! -- Temporary!!! Testing!!!!!
    //i=4096;


    for (j=0;j<end_of_kernel_page;j++)  
      {	
	PTE_T *page;
	
	page=(PTE_T *)(ivmmPDE_getAddress(dir)+RELOC_PATCH);
	page+=j;                                                  // j-th PTE
	
	tmpaddr=0x100000+j*0x1000;                                // Physical address from 0x00000000  (1MB+j*4KB) 1MB is the Kernel load address in physical memory
	ivmmPTE_setAccess(page,PG_PRESENT|PG_WRITE|PG_SUPER);      // Page present, superuser write
	ivmmPTE_setOsBits(page,0);                                 // OS reserved bits set to 0
	ivmmPTE_setAddress(page,tmpaddr);                          // physical address	  
	
	// set the physical page to busy in the allocator
	ivmmAllocator_setPageBusy(tmpaddr);	  
      }
 
    // Initialize management variables
    //core_start=core_end=0x80000000+end_of_kernel_page*MEM_PAGE_SIZE;
    core_start=core_end=0x90000000;

    // Use Fractal representation
    // map page directory as last PDE in itself
    // so we can see the mappings at the top 4 MB of the address space
    // and the page directory itself at the top 4 KB.

    dir=(PDE_T *)(PageDirectory+RELOC_PATCH);    
    dir+=(PAGE_DIR_SIZE-1);
    ivmmPDE_setAccess(dir,PG_PRESENT|PG_WRITE|PG_SUPER);
    ivmmPDE_setOsBits(dir,0);
    ivmmPDE_setAddress(dir,PageDirectory);                          

    // Enable paging 
    paging_enable(PageDirectory);
  }

void virtual_to_pde_pte(dword_t virtual_address,dword_t *pde_idx,dword_t *pte_idx)
  {
    *pte_idx=(virtual_address>>12)&0x03FF;
    *pde_idx=(virtual_address>>22)&0x03FF;    
  }

void virtual_to_pde_pte_ptr(dword_t virtual_address,PDE_T **pde,PTE_T **pte)
  {
    dword_t pde_idx,pte_idx;
    dword_t pde_addr,pte_addr;    

    virtual_to_pde_pte(virtual_address,&pde_idx,&pte_idx);

    pde_addr=PAGE_DIR_START+PT_INCREMENT*pde_idx;
    pte_addr=PAGE_TAB_START+PT_INCREMENT*(PAGE_TAB_SIZE*pde_idx+pte_idx);

    // Point to PDE and PTE
    *pde=(PDE_T *)pde_addr;
    *pte=(PTE_T *)pte_addr;
  }

void remap_video_memory(void)
  {
    ivmmAllocator_setPageBusy(0xB8000);
    //memory_map(0xB8000,0xB8000,PG_PRESENT | PG_SUPER | PG_WRITE);    
    // video memory
    memory_map(0xF0000000,0xB8000,PG_PRESENT | PG_SUPER | PG_WRITE);
  }


// Free first 4 MB of RAM used initially
void virtual_post_init(void)
  {
    PDE_T *pde;
    PTE_T *base_pte;
    int i,j;
    dword_t tmp,vaddr;

    for (i=0;i<1024;i++)
      memory_unmap(0x1000*i,&tmp);



    // Pre allocate page directory entries common to all the kernel and user processes

    vaddr=CORE_START;
    for (j=0;j<192;j++)
      {
	//vaddr=CORE_START+MEM_PAGE_SIZE*i;

	// Retrieve PDE and PTE pointers
	virtual_to_pde_pte_ptr(vaddr,&pde,&base_pte);
	
	// Get a page to store the PTEs
	tmp=ivmmAllocator_AllocatePage();
	// Set access to page present + super + write
	ivmmPDE_setAccess(pde,PG_PRESENT | PG_SUPER | PG_WRITE);   
	// Set OS reserved bits to 0
	ivmmPDE_setOsBits(pde,0);
	// Store the Page Table's address into the PDE
	ivmmPDE_setAddress(pde,tmp);
	
	// retrieve base PTE pointer (by masking lower 12 bits)
	//virtual_to_pde_pte_ptr(virt_address&0xFFFFF000,&pde,&base_pte);
	virtual_to_pde_pte_ptr(vaddr&0xFFC00000,&pde,&base_pte);
	
	// Set all the PTEs to NOT PRESENT 
	for (i=0;i<PAGE_TAB_SIZE;i++)
	  ivmmPTE_setAccess(base_pte+i,PG_NOT_PRESENT);	

	// increment virtual address
	// increment of 4 MB (page size * num of pages in PT ==> 4 KB * 1024 = 4 MB)
	vaddr += MEM_PAGE_SIZE * PAGE_TAB_SIZE;
      }
  }

// maps a virtual address to a physical address
// N.B. the page frame relative to the physical address
// must have previously been set busy in the allocator
// else it might be allocated for something else
int memory_map(dword_t virt_address,dword_t phys_address,byte_t access)
  {
    PDE_T *pde;
    PTE_T *pte;
    PTE_T *base_pte;
    byte_t pde_access,pte_access;
    dword_t tmp_page;
    int i;

    // Retrieve PDE and PTE pointers
    virtual_to_pde_pte_ptr(virt_address,&pde,&pte);

    pde_access=ivmmPDE_getAccess(pde);

    // if pde not present, then allocate it
    if (!(pde_access & PG_PRESENT))
      {
	// Get a page to store the PTEs
	tmp_page=ivmmAllocator_AllocatePage();
	// Set access to page present + user + write
	// this is common to all PDEs (except for kernel's original)
	ivmmPDE_setAccess(pde,PG_PRESENT | PG_USER | PG_WRITE);   
	// Set OS reserved bits to 0
	ivmmPDE_setOsBits(pde,0);
	// Store the Page Table's address into the PDE
	ivmmPDE_setAddress(pde,tmp_page);

	// retrieve base PTE pointer (by masking lower 12 bits)
	//virtual_to_pde_pte_ptr(virt_address&0xFFFFF000,&pde,&base_pte);
	virtual_to_pde_pte_ptr(virt_address&0xFFC00000,&pde,&base_pte);

	// Set all the PTEs to NOT PRESENT 
	for (i=0;i<PAGE_TAB_SIZE;i++)
	  ivmmPTE_setAccess(base_pte+i,PG_NOT_PRESENT);	
      }

    // now pde is present (or it might have been already)

    // are we trying to map an already mapped page?
    pte_access=ivmmPTE_getAccess(pte);
    if (pte_access & PG_PRESENT)
      return 1; // yes... then error!

    // At this point the pte is not present, we allocate it and map it.

    // Set access to page present + requested flags
    ivmmPTE_setAccess(pte,PG_PRESENT | access);   
    // Set OS reserved bits to 0
    ivmmPTE_setOsBits(pte,0);
    // Store the frame address into the PTE
    ivmmPTE_setAddress(pte,phys_address);

    invalidate_tlb_entry(virt_address);
    
    return 0;
  }

// unmaps a virtual address and frees the physical page table as well
// it does not deallocate the data page
// returns the physical address it pointed to (data page)
int memory_unmap(dword_t virt_address,dword_t *phys_address)
  {
    PDE_T *pde;
    PTE_T *pte;
    PTE_T *base_pte;
    byte_t pde_access,pte_access;
    dword_t tmp_page;
    int i,found;

    // Retrieve PDE and PTE pointers
    virtual_to_pde_pte_ptr(virt_address,&pde,&pte);

    pde_access=ivmmPDE_getAccess(pde);

    // Are we trying to unmap a page with an unmapped PDE?
    if (!(pde_access & PG_PRESENT))
      return 1; // yes... then error!

    // if we're here, then PDE is present
    // get PTE access
    pte_access=ivmmPTE_getAccess(pte);

    // Are we trying to unmap an unmapped page?
    if (!(pte_access & PG_PRESENT))
      return 1; // yes... then error!

    // At this point we know pte is present, so unmap it
    ivmmPTE_setAccess(pte,PG_NOT_PRESENT);   
    // retrieve original physical address
    tmp_page=ivmmPTE_getAddress(pte);
    // free the page frame
    //ivmmAllocator_DeallocatePage(tmp_page);
    *phys_address=tmp_page;

    // the PDE might have all PTEs set to not present, if so, free the PDE too
    
    // retrieve base PTE pointer (by masking lower 12 bits)
    virtual_to_pde_pte_ptr(virt_address&0xFFC00000,&pde,&base_pte);
    //virtual_to_pde_pte_ptr(virt_address&0xFFFFF000,&pde,&base_pte);
    // use tmp as "found" flag
    found=0;
    // Set all the PTEs to NOT PRESENT 
    for (i=0;i<PAGE_TAB_SIZE;i++)
      if (ivmmPTE_getAccess(base_pte+i)!=PG_NOT_PRESENT)
	{
	  found=1;
	  break;
	}
    
    // if there are no allocated page in this PDE, then we can free it
    if (!found)
      {
	// get original physical address
	tmp_page=ivmmPDE_getAddress(pde);
	// free the PTEs page
	ivmmAllocator_DeallocatePage(tmp_page);
	//now set the PDE to not present
	ivmmPDE_setAccess(pde,PG_NOT_PRESENT);
      }
    
    invalidate_tlb_entry(virt_address);

    return 0;
  }

// unmaps a virtual address and PRESERVES the physical page table
// it does not deallocate the data page
// returns the physical address it pointed to (data page)
int memory_unmap_preserve(dword_t virt_address,dword_t *phys_address)
  {
    PDE_T *pde;
    PTE_T *pte;
    PTE_T *base_pte;
    byte_t pde_access,pte_access;
    dword_t tmp_page;
    int i,found;

    // Retrieve PDE and PTE pointers
    virtual_to_pde_pte_ptr(virt_address,&pde,&pte);

    pde_access=ivmmPDE_getAccess(pde);

    // Are we trying to unmap a page with an unmapped PDE?
    if (!(pde_access & PG_PRESENT))
      return 1; // yes... then error!

    // if we're here, then PDE is present
    // get PTE access
    pte_access=ivmmPTE_getAccess(pte);

    // Are we trying to unmap an unmapped page?
    if (!(pte_access & PG_PRESENT))
      return 1; // yes... then error!

    // At this point we know pte is present, so unmap it
    ivmmPTE_setAccess(pte,PG_NOT_PRESENT);   
    // retrieve original physical address
    tmp_page=ivmmPTE_getAddress(pte);
    // free the page frame
    //ivmmAllocator_DeallocatePage(tmp_page);
    *phys_address=tmp_page;

    // the PDE might have all PTEs set to not present, if so, free the PDE too
    
    // retrieve base PTE pointer (by masking lower 12 bits)
    virtual_to_pde_pte_ptr(virt_address&0xFFC00000,&pde,&base_pte);
    //virtual_to_pde_pte_ptr(virt_address&0xFFFFF000,&pde,&base_pte);
    // use tmp as "found" flag
    found=0;
    // Set all the PTEs to NOT PRESENT 
    for (i=0;i<PAGE_TAB_SIZE;i++)
      if (ivmmPTE_getAccess(base_pte+i)!=PG_NOT_PRESENT)
	{
	  found=1;
	  break;
	}

    /*
    // if there are no allocated page in this PDE, then we can free it
    if (!found)
      {
	// get original physical address
	tmp_page=ivmmPDE_getAddress(pde);
	// free the PTEs page
	ivmmAllocator_DeallocatePage(tmp_page);
	//now set the PDE to not present
	ivmmPDE_setAccess(pde,PG_NOT_PRESENT);
      }
    */
    
    invalidate_tlb_entry(virt_address);

    return 0;
  }



void *morecore(long increment)
  {
    dword_t chunk_start_addr;
    int i,pages;

    // compute how many pages
    pages=increment>>MEM_PAGE_SIZE_BITS;
    // mask to see if we need one more page
    if (increment&(MEM_PAGE_SIZE-1))
      pages++;

    
    // start of allocation
    chunk_start_addr=core_end;

    for (i=0;i<pages;i++)
      {
	allocate_page(core_end,PG_SUPER | PG_WRITE);
	// one page has been allocated
	core_end+=MEM_PAGE_SIZE;
      }
    
    return (void *)chunk_start_addr;

    // TO DO: if error...
    //return NULL;
  }

void allocate_page(dword_t virt_address,byte_t access)
  {
    dword_t phys_address;
    int hwi;

    // aquire mutex
    hwi=lock_acquire_cli(&paging_mutex);

    // TO DO: check allocation status, etc

    // get a physical page
    phys_address=ivmmAllocator_AllocatePage();
    // map it
    memory_map(virt_address,phys_address,access);
    //memory_map(virt_address,phys_address,PG_USER | PG_WRITE);

    // release mutex
    lock_release_sti(&paging_mutex,hwi);
  }

void release_page(dword_t virt_address)
  {
    dword_t phys_address;
    int hwi;

    // aquire mutex
    hwi=lock_acquire_cli(&paging_mutex);

    // TO DO: check allocation status, etc

    // unmaps and releases the address
    memory_unmap(virt_address,&phys_address);
    ivmmAllocator_DeallocatePage(phys_address);

    // release mutex
    lock_release_sti(&paging_mutex,hwi);
  }

// creates a new page directory and copies the kernel pages into it
// returns the physical address of the page directory (page aligned)
dword_t new_address_space()
  {
    dword_t phys_address,ph2;
    dword_t *src,*dst;
    int i;

    // get a physical page
    phys_address=ivmmAllocator_AllocatePage();

    // temporarily map the new page directory to NEW_VIRTUAL_SPACE
    memory_map(NEW_VIRTUAL_SPACE,phys_address,PG_SUPER | PG_WRITE);

    // clone kernel space to new page directory
    dst=(dword_t *)NEW_VIRTUAL_SPACE;
    src=(dword_t *)PAGE_DIR_START;

    // zero out lower 512 entries (512 is PAGE_DIR_SIZE/2)
    for (i=0;i<512;i++)      
      *(dst+i) = (dword_t)0;
    
    // copy upper half of page directory
    for (i=512;i<PAGE_DIR_SIZE;i++)      
      *(dst+i) = *(src+i);

    // patch upper entry to point to itself
    dst=(dword_t *)NEW_VIRTUAL_SPACE;
    dst+=(PAGE_DIR_SIZE-1);
    ivmmPDE_setAccess(dst,PG_PRESENT|PG_WRITE|PG_SUPER);
    ivmmPDE_setOsBits(dst,0);
    ivmmPDE_setAddress(dst,phys_address);

    // unmap the new virtual space
    memory_unmap_preserve(NEW_VIRTUAL_SPACE,&ph2);

    // ph2 should be equal to phys_address
    if (phys_address != ph2)
      {
	printf("VMM: phys address: 0x%x vs 0x%x\n",phys_address,ph2);
	panic("VMM - new_address_space: addresses mismatch.");
      }
    
    return phys_address;
  }

// for sake of completeness
void destroy_address_space(dword_t page_dir)
  {
    ivmmAllocator_DeallocatePage(page_dir);
  }
