#include "types.h"
#include "globals.h"
#include "kernel.h"
#include "errno.h"

#include "util/debug.h"

#include "proc/proc.h"

#include "mm/mm.h"
#include "mm/mman.h"
#include "mm/page.h"
#include "mm/mmobj.h"
#include "mm/pframe.h"
#include "mm/pagetable.h"

#include "vm/pagefault.h"
#include "vm/vmmap.h"

/*
 * This gets called by _pt_fault_handler in mm/pagetable.c The
 * calling function has already done a lot of error checking for
 * us. In particular it has checked that we are not page faulting
 * while in kernel mode. Make sure you understand why an
 * unexpected page fault in kernel mode is bad in Weenix. You
 * should probably read the _pt_fault_handler function to get a
 * sense of what it is doing.
 *
 * Before you can do anything you need to find the vmarea that
 * contains the address that was faulted on. Make sure to check
 * the permissions on the area to see if the process has
 * permission to do [cause]. If either of these checks does not
 * pass kill the offending process, setting its exit status to
 * EFAULT (normally we would send the SIGSEGV signal, however
 * Weenix does not support signals).
 *
 * Now it is time to find the correct page (don't forget
 * about shadow objects, especially copy-on-write magic!). Make
 * sure that if the user writes to the page it will be handled
 * correctly.
 *
 * Finally call pt_map to have the new mapping placed into the
 * appropriate page table.
 *
 * @param vaddr the address that was accessed to cause the fault
 *
 * @param cause this is the type of operation on the memory
 *              address which caused the fault, possible values
 *              can be found in pagefault.h
 */
void
handle_pagefault(uintptr_t vaddr, uint32_t cause)
{
	dbg_print("handle_pagefault is called with vaddr : %x\n", vaddr);
    dbg_print("current process is : %d\n", curproc->p_pid);
	KASSERT(NULL != curproc->p_pagedir);
	KASSERT(NULL != curproc->p_vmmap);

    int vfn = ADDR_TO_PN(vaddr);
	vmmap_t* map = curproc->p_vmmap;
	vmarea_t * vma = vmmap_lookup(map, vfn);
	
	/*check the permission*/ 
    showVmMapInfo(map, "show vmareas in handle_pagefault\n");


    if(!vma /*|| !(vma->vma_prot & cause)*/)
    {
        KASSERT(NULL);
        proc_kill(curproc, -EFAULT);
       /* curthr->kt_errno = EFAULT;*/
        return;
    }

     if( vma->vma_prot == PROT_NONE)
    {
        KASSERT(NULL);

    }

   /* if( (cause & PROT_WRITE) && (vma->vma_flags & MAP_PRIVATE))
    {
         KASSERT(NULL);
    }*/
	
	int pageNum = vma->vma_off + vfn - vma->vma_start;

	pframe_t* pf = NULL;
    vma->vma_obj->mmo_ops->lookuppage(vma->vma_obj, pageNum, 0, &pf);
    KASSERT(PAGE_ALIGNED(pf->pf_addr));

    /*pframe_pin(pf);*/
   /* int res = pt_map(curproc->p_pagedir, (uintptr_t)PN_TO_ADDR(vfn), (uintptr_t)pf->pf_addr, PD_PRESENT | PD_USER , PT_PRESENT | PT_USER);*/
    int res = pt_map(pt_get(), (uint32_t)PAGE_ALIGN_DOWN(vaddr), pt_virt_to_phys(pf->pf_addr), PD_USER|vma->vma_prot|cause , PT_USER|vma->vma_prot|cause );
    KASSERT(res >= 0 && "pt_map error\n");
}
