#include "kernel.h"
#include "errno.h"
#include "globals.h"

#include "vm/vmmap.h"
#include "vm/shadow.h"
#include "vm/anon.h"

#include "proc/proc.h"

#include "util/debug.h"
#include "util/list.h"
#include "util/string.h"
#include "util/printf.h"

#include "fs/vnode.h"
#include "fs/file.h"
#include "fs/fcntl.h"
#include "fs/vfs_syscall.h"

#include "mm/slab.h"
#include "mm/page.h"
#include "mm/mm.h"
#include "mm/mman.h"
#include "mm/mmobj.h"

static slab_allocator_t *vmmap_allocator;
static slab_allocator_t *vmarea_allocator;

void
vmmap_init(void)
{
        vmmap_allocator = slab_allocator_create("vmmap", sizeof(vmmap_t));
        KASSERT(NULL != vmmap_allocator && "failed to create vmmap allocator!");
        vmarea_allocator = slab_allocator_create("vmarea", sizeof(vmarea_t));
        KASSERT(NULL != vmarea_allocator && "failed to create vmarea allocator!");
}

vmarea_t *
vmarea_alloc(void)
{
        vmarea_t *newvma = (vmarea_t *) slab_obj_alloc(vmarea_allocator);
        if (newvma) {
                newvma->vma_vmmap = NULL;
        }
        /*set fileds to zeros*/
        newvma->vma_prot = 0;
        newvma->vma_flags = 0;
        newvma->vma_obj = NULL;
        return newvma;
}

void
vmarea_free(vmarea_t *vma)
{
        KASSERT(NULL != vma);
        slab_obj_free(vmarea_allocator, vma);
}

/* Create a new vmmap, which has no vmareas and does
 * not refer to a process. */
vmmap_t *
vmmap_create(void)
{
        vmmap_t* newvmmap = slab_obj_alloc(vmmap_allocator);
        KASSERT(NULL != newvmmap && "new allocated vmmap cannot be empty\n");

        /*init variables*/
        list_init(&newvmmap->vmm_list);
        /*newvmmap->vmm_proc = curproc;*/
        return newvmmap;
}

/* Removes all vmareas from the address space and frees the
 * vmmap struct. */
void
vmmap_destroy(vmmap_t *map)
{
    KASSERT(NULL != map);
    vmarea_t *vma;
    list_iterate_begin(&map->vmm_list, vma, vmarea_t, vma_plink) {
          list_remove(&vma->vma_plink);
          vmarea_free(vma);
        } list_iterate_end();

    slab_obj_free(vmmap_allocator, map);

}

/* Add a vmarea to an address space. Assumes (i.e. asserts to some extent)
 * the vmarea is valid.  This involves finding where to put it in the list
 * of VM areas, and adding it. Don't forget to set the vma_vmmap for the
 * area. */
void
vmmap_insert(vmmap_t *map, vmarea_t *newvma)
{
    KASSERT( NULL != map && NULL != newvma);
    KASSERT(NULL == newvma->vma_vmmap);
    KASSERT(newvma->vma_start < newvma->vma_end);
    KASSERT(ADDR_TO_PN(USER_MEM_LOW) <= newvma->vma_start && ADDR_TO_PN(USER_MEM_HIGH) >= newvma->vma_end);

    /*set the vma_vmmap for the area*/
    newvma->vma_vmmap = map;

    /*insert the newvma in increasing order of newvma->vma_start*/
    if(list_empty(&map->vmm_list))
    {
        list_insert_head(&map->vmm_list, &newvma->vma_plink);
        return;
    }

    /*if the newvma is the largest, then add to the end of the list*/
    vmarea_t *vma;
    vma = list_tail(&map->vmm_list, vmarea_t, vma_plink);
    if(vma->vma_end <= newvma->vma_start)
    {
        list_insert_tail(&map->vmm_list, &newvma->vma_plink);
        return;
    }

    vma = list_head(&map->vmm_list, vmarea_t, vma_plink);
    if(vma->vma_start >= newvma->vma_end)
    {
        list_insert_head(&map->vmm_list, &newvma->vma_plink);
        return;
    }



    /*else iterator the list, find the best place*/
    vmarea_t *vma_next;
     list_link_t *link, *link_next;
     for (link = map->vmm_list.l_next;
          link != &map->vmm_list; link = link->l_next)
     {
        vma = list_item(link, vmarea_t, vma_plink);
        link_next = link->l_next;
        vma_next = list_item(link_next, vmarea_t, vma_plink);
        KASSERT( vma->vma_end <= vma_next->vma_start && "vma list must in increasing order\n");
        if( newvma->vma_start >= vma->vma_end && newvma->vma_end <= vma_next->vma_start)
        {
            list_insert_before(link_next, &newvma->vma_plink);
            break;
        }
        
     }
}

/* Find a contiguous range of free virtual pages of length npages in
 * the given address space. Returns starting vfn for the range,
 * without altering the map. Returns -1 if no such range exists.
 *
 * Your algorithm should be first fit. If dir is VMMAP_DIR_HILO, you
 * should find a gap as high in the address space as possible; if dir
 * is VMMAP_DIR_LOHI, the gap should be as low as possible. */

int
vmmap_find_range(vmmap_t *map, uint32_t npages, int dir)
{
    KASSERT(NULL != map);
    KASSERT(0 < npages);
    KASSERT(dir == VMMAP_DIR_HILO || dir == VMMAP_DIR_LOHI);

    vmarea_t *vma;
    if(dir == VMMAP_DIR_LOHI)
    {
         list_iterate_begin(&map->vmm_list, vma, vmarea_t, vma_plink) {
         if( vma->vma_end - vma->vma_start >= npages)
         {
            return vma->vma_start;
         }
        } list_iterate_end();
    }
    else
    {
         list_iterate_reverse(&map->vmm_list, vma, vmarea_t, vma_plink) {
         if( vma->vma_end - vma->vma_start >= npages)
         {
            return vma->vma_start;
         }
        } list_iterate_end();
        
    }
    return 0;
}

/* Find the vm_area that vfn lies in. Simply scan the address space
 * looking for a vma whose range covers vfn. If the page is unmapped,
 * return NULL. */
vmarea_t *
vmmap_lookup(vmmap_t *map, uint32_t vfn)
{
  /*   If the page is unmapped, return NULL. ?????*/
    KASSERT(NULL != map);
    vmarea_t *vma;
    list_iterate_begin(&map->vmm_list, vma, vmarea_t, vma_plink) {
            if( vma->vma_start <= vfn && vfn < vma->vma_end)
             {
                return vma;  
             }
        } list_iterate_end();
    return NULL;
}

/* Allocates a new vmmap containing a new vmarea for each area in the
 * given map. The areas should have no mmobjs set yet. Returns pointer
 * to the new vmmap on success, NULL on failure. This function is
 * called when implementing fork(2). */
vmmap_t *
vmmap_clone(vmmap_t *map)
{
       /* NOT_YET_IMPLEMENTED("VM: vmmap_clone");*/
        vmmap_t* newmap = vmmap_create();
        if(newmap == NULL)
            return NULL;
        vmarea_t *vma, *newvma;
        list_iterate_begin(&map->vmm_list, vma, vmarea_t, vma_plink) {
            /*create a similar vmarea and add to the newmap->vmm_list*/
            newvma = vmarea_alloc();
            if(newvma == NULL)
                return NULL;
            newvma->vma_start = vma->vma_start;
            newvma->vma_end = vma->vma_end;
            newvma->vma_off = vma->vma_off;
            newvma->vma_prot = vma->vma_prot;
            /*don't know if this is correct*/
            /*newvma->*/

            list_insert_tail(&newmap->vmm_list, &newvma->vma_plink);

        } list_iterate_end();
        return newmap;
}

/* Insert a mapping into the map starting at lopage for npages pages.
 * If lopage is zero, we will find a range of virtual addresses in the
 * process that is big enough, by using vmmap_find_range with the same
 * dir argument.  If lopage is non-zero and the specified region
 * contains another mapping that mapping should be unmapped.
 *
 * If file is NULL an anon mmobj will be used to create a mapping
 * of 0's.  If file is non-null that vnode's file will be mapped in
 * for the given range.  Use the vnode's mmap operation to get the
 * mmobj for the file; do not assume it is file->vn_obj. Make sure all
 * of the area's fields except for vma_obj have been set before
 * calling mmap.
 *
 * If MAP_PRIVATE is specified set up a shadow object for the mmobj.
 *
 * All of the input to this function should be valid (KASSERT!).
 * See mmap(2) for for description of legal input.
 * Note that off should be page aligned.
 *
 * Be very careful about the order operations are performed in here. Some
 * operation are impossible to undo and should be saved until there
 * is no chance of failure.
 *
 * If 'new' is non-NULL a pointer to the new vmarea_t should be stored in it.
 */
int
vmmap_map(vmmap_t *map, vnode_t *file, uint32_t lopage, uint32_t npages,
          int prot, int flags, off_t off, int dir, vmarea_t **new)
{
    dbg_print("npages is %d, file is 0x%p, start is %#.5x\n", npages, file, lopage);
 /* size_t
vmmap_mapping_info(const void *vmmap, char *buf, size_t osize)*/

     KASSERT(NULL != map);
     KASSERT(0 < npages);
     KASSERT(!(~(PROT_NONE | PROT_READ | PROT_WRITE | PROT_EXEC) & prot));
     KASSERT((MAP_SHARED & flags) || (MAP_PRIVATE & flags));
     KASSERT((0 == lopage) || (ADDR_TO_PN(USER_MEM_LOW) <= lopage));
     KASSERT((0 == lopage) || (ADDR_TO_PN(USER_MEM_HIGH) >= (lopage + npages)));
     KASSERT(PAGE_ALIGNED(off));


     char buf[2048];
     dbg_print("before vmmap\n");
     vmmap_mapping_info(map, buf, 2048);
     dbgq(DBG_VM, ("%s", buf));


     vmarea_t* vma = NULL;

     if( 0 == lopage)
     {
        /*the start of one available vmarea*/
        int vma_start = vmmap_find_range(map, npages, dir);
        KASSERT(vma_start >= ADDR_TO_PN(USER_MEM_LOW));

        vma = vmmap_lookup(map, vma_start);
        KASSERT(vma != NULL);
     }
     else
    {
        /*unmap the area[lopage, lopage + napges] and allocate a new vmarea contains this area*/
        int res = vmmap_remove(map, lopage, npages);

         vmmap_mapping_info(map, buf, 2048);
         dbg_print("after vmmap_remove\n");
         dbgq(DBG_VM, ("%s", buf));


        vma = vmarea_alloc();
        vma->vma_start = lopage;
        vma->vma_end = lopage + npages;
        vmmap_insert(map, vma);

         vmmap_mapping_info(map, buf, 2048);
         dbg_print("after vmmap_insert new vma\n");
         dbgq(DBG_VM, ("%s", buf));

    }

    vma->vma_prot = prot;
    vma->vma_flags = flags;
    vma->vma_off = off;

    if(NULL == file)
    {
        /*If file is NULL an anon mmobj will be used to create a mapping of 0's.*/
       /* KASSERT(NULL);*/
        mmobj_t* anon = anon_create();
        vma->vma_obj = anon;   

       /* anon->mmo_ops->fillpage(anon,);*/
      /*  anon->mmo_ops->ref(anon);*/
    }
    else
    {
        mmobj_t* mm;
        /* If file is non-null that vnode's file will be mapped for the given range. */
        int res = file->vn_ops->mmap(file, vma, &mm);
        KASSERT(res >= 0);
        vma->vma_obj = mm;

    }

    if(new)
    {
        *new = vma;
    }

    vmmap_mapping_info(map, buf, 2048);
    dbg_print("after vmmap_map function\n");
    dbgq(DBG_VM, ("%s", buf));

      /*  NOT_YET_IMPLEMENTED("VM: vmmap_map");*/
        return 0;
}


/* * We have no guarantee that the region of the address space being
 * unmapped will play nicely with our list of vmareas.
 *
 * You must iterate over each vmarea that is partially or wholly covered
 * by the address range [addr ... addr+len). The vm-area will fall into one
 * of four cases, as illustrated below:
 *
 * key:
 *          [             ]   Existing VM Area
 *        *******             Region to be unmapped
 *
 * Case 1:  [   ******    ]
 * The region to be unmapped lies completely inside the vmarea. We need to
 * split the old vmarea into two vmareas. be sure to increment the
 * reference count to the file associated with the vmarea.
 *
 * Case 2:  [      *******]**
 * The region overlaps the end of the vmarea. Just shorten the length of
 * the mapping.
 *
 * Case 3: *[*****        ]
 * The region overlaps the beginning of the vmarea. Move the beginning of
 * the mapping (remember to update vma_off), and shorten its length.
 *
 * Case 4: *[*************]**
 * The region completely contains the vmarea. Remove the vmarea from the
 * list.*/
 
int
vmmap_remove(vmmap_t *map, uint32_t lopage, uint32_t npages)
{
        /*NOT_YET_IMPLEMENTED("VM: vmmap_remove");*/
    KASSERT(0 < npages && (ADDR_TO_PN(USER_MEM_LOW) <= lopage));

    vmarea_t *vma;
    list_iterate_begin(&map->vmm_list, vma, vmarea_t, vma_plink) {
       
        if( vma->vma_start >= lopage + npages || vma->vma_end <= lopage)
            continue;


        if( vma->vma_start < lopage)
        {
            if( lopage + npages < vma->vma_end)
            {
                /*Case 1:  [   ******    ]*/
                vma->vma_end = lopage;
                
                vmarea_t* new_vmarea = vmarea_alloc();
                new_vmarea->vma_start = lopage + npages;
                new_vmarea->vma_end = vma->vma_end;

                KASSERT(NULL && "be sure to increment the reference count to the file associated with the vmarea");

            }
            else
            {
                 /*Case 2:  [      *******]***/
                vma->vma_end = lopage;
            }

        }
        else
        {
            if( lopage + npages < vma->vma_end)
            {
               /*Case 3: *[*****        ]*/
                vma->vma_start = lopage + npages;
            }
            else
            {
                /*Case 4: *[*************]***/
                list_remove(&vma->vma_plink);
                vmarea_free(vma);
            }
        }

        } list_iterate_end();

        return 0;
}

/*
 * Returns 1 if the given address space has no mappings for the
 * given range, 0 otherwise.
 */
int
vmmap_is_range_empty(vmmap_t *map, uint32_t startvfn, uint32_t npages)
{
    vmarea_t *vma;
    list_iterate_begin(&map->vmm_list, vma, vmarea_t, vma_plink) {
        if(vma->vma_end > startvfn && vma->vma_start < startvfn + npages)
        {
            /*has overlap*/
            if((MAP_SHARED & vma->vma_flags) || (MAP_PRIVATE & vma->vma_flags))
                return 0;
        }

         } list_iterate_end();
        return 1;
}

/* Read into 'buf' from the virtual address space of 'map' starting at
 * 'vaddr' for size 'count'. To do so, you will want to find the vmareas
 * to read from, then find the pframes within those vmareas corresponding
 * to the virtual addresses you want to read, and then read from the
 * physical memory that pframe points to. You should not check permissions
 * of the areas. Assume (KASSERT) that all the areas you are accessing exist.
 * Returns 0 on success, -errno on error.
 */
int
vmmap_read(vmmap_t *map, const void *vaddr, void *buf, size_t count)
{
       /* NOT_YET_IMPLEMENTED("VM: vmmap_read");*/
        int next_count = count;
        int bytes_readed = 0;
        const void* next_vaddr = vaddr;

        while(next_count)
        {
            int vfn = ADDR_TO_PN(next_vaddr);
            vmarea_t* vma = vmmap_lookup(map, vfn);
            if( NULL == vma)
                return -1;
            
            int avail_pages = vma->vma_end - vfn;
            int avail = PAGE_SIZE - PAGE_OFFSET(vaddr) + 1 + PAGE_SIZE*avail_pages;

            int bytes_to_read = next_count <= avail ? next_count : avail;

            /*find the physical page frame corrspond to vfn */
            pframe_t* pf = NULL;
            int pageNum = vma->vma_off + vfn - vma->vma_start;
            vma->vma_obj->mmo_ops->lookuppage(vma->vma_obj, pageNum, 0, &pf);
            memcpy(buf + bytes_readed, pf->pf_addr + PAGE_OFFSET(vaddr), bytes_to_read);

            next_count -= bytes_to_read;
            next_vaddr += bytes_to_read;
            bytes_readed += bytes_to_read;
        }
        return 0;
}

/* Write from 'buf' into the virtual address space of 'map' starting at
 * 'vaddr' for size 'count'. To do this, you will need to find the correct
 * vmareas to write into, then find the correct pframes within those vmareas,
 * and finally write into the physical addresses that those pframes correspond
 * to. You should not check permissions of the areas you use. Assume (KASSERT)
 * that all the areas you are accessing exist. Remember to dirty pages!
 * Returns 0 on success, -errno on error.
 */
int
vmmap_write(vmmap_t *map, void *vaddr, const void *buf, size_t count)
{
        /*NOT_YET_IMPLEMENTED("VM: vmmap_write");*/
        int next_count = count;
        void* next_vaddr = vaddr;
        int bytes_writed = 0;


        while(next_count)
        {
            int vfn = ADDR_TO_PN(next_vaddr);

            vmarea_t* vma = vmmap_lookup(map, vfn);
            int avail_pages = vma->vma_end - vfn-1;
            int avail = PAGE_SIZE - PAGE_OFFSET(vaddr) + PAGE_SIZE*avail_pages;

            int bytes_to_write = next_count <= avail ? next_count : avail;

            int ph_page_start = vma->vma_off + vfn - vma->vma_start;

            /*find the physical page frame corrspond to vfn and 
            write buf[bytes_writed+1 ~  bytes_writed + bytes_to_write + 1] to physical frame*/
           pframe_t* pf = NULL;
           vma->vma_obj->mmo_ops->lookuppage(vma->vma_obj, ph_page_start, 1, &pf);
           memcpy(pf->pf_addr + PAGE_OFFSET(vaddr), buf + bytes_writed, bytes_to_write);

           /*dirty pages*/
           int index = 0;
           if(bytes_to_write > PAGE_SIZE - PAGE_OFFSET(vaddr) + 1)
           {
                int rest = bytes_to_write - (PAGE_SIZE - PAGE_OFFSET(vaddr) + 1);
                index = rest % PAGE_SIZE == 0 ?  rest / PAGE_SIZE : rest / PAGE_SIZE + 1;
           }
           int i;
           for(i = 0; i <= index; ++i)
           {
               vma->vma_obj->mmo_ops->lookuppage(vma->vma_obj, ph_page_start + i, 1, &pf);
               vma->vma_obj->mmo_ops->dirtypage(vma->vma_obj, pf);
           }

            next_count -= bytes_to_write;
            next_vaddr += bytes_to_write;
            bytes_writed += bytes_to_write;
        }
        
        return 0;
}

/* a debugging routine: dumps the mappings of the given address space. */
size_t
vmmap_mapping_info(const void *vmmap, char *buf, size_t osize)
{
        KASSERT(0 < osize);
        KASSERT(NULL != buf);
        KASSERT(NULL != vmmap);

        vmmap_t *map = (vmmap_t *)vmmap;
        vmarea_t *vma;
        ssize_t size = (ssize_t)osize;

        int len = snprintf(buf, size, "%21s %5s %7s %8s %10s %12s\n",
                           "VADDR RANGE", "PROT", "FLAGS", "MMOBJ", "OFFSET",
                           "VFN RANGE");

        list_iterate_begin(&map->vmm_list, vma, vmarea_t, vma_plink) {
                size -= len;
                buf += len;
                if (0 >= size) {
                        goto end;
                }

                len = snprintf(buf, size,
                               "%#.8x-%#.8x  %c%c%c  %7s 0x%p %#.5x %#.5x-%#.5x\n",
                               vma->vma_start << PAGE_SHIFT,
                               vma->vma_end << PAGE_SHIFT,
                               (vma->vma_prot & PROT_READ ? 'r' : '-'),
                               (vma->vma_prot & PROT_WRITE ? 'w' : '-'),
                               (vma->vma_prot & PROT_EXEC ? 'x' : '-'),
                               (vma->vma_flags & MAP_SHARED ? " SHARED" : "PRIVATE"),
                               vma->vma_obj, vma->vma_off, vma->vma_start, vma->vma_end);
        } list_iterate_end();

end:
        if (size <= 0) {
                size = osize;
                buf[osize - 1] = '\0';
        }
        /*
        KASSERT(0 <= size);
        if (0 == size) {
                size++;
                buf--;
                buf[0] = '\0';
        }
        */
        return osize - size;
}

void showVmMapInfo(const void *vmmap, const char* str)
{
     dbg_print(str);
     char buf[2048];
     vmmap_mapping_info(vmmap, buf, 2048);
     dbgq(DBG_VM, ("%s", buf));
}


