#include "kernel.h"
#include "errno.h"
#include "globals.h"

#include "vm/vmmap.h"
#include "vm/shadow.h"
#include "vm/anon.h"

#include "proc/proc.h"

#include "util/debug.h"
#include "util/list.h"
#include "util/string.h"
#include "util/printf.h"

#include "fs/vnode.h"
#include "fs/file.h"
#include "fs/fcntl.h"
#include "fs/vfs_syscall.h"

#include "mm/slab.h"
#include "mm/page.h"
#include "mm/mm.h"
#include "mm/mman.h"
#include "mm/mmobj.h"
#include "mm/pagetable.h"

static slab_allocator_t *vmmap_allocator;
static slab_allocator_t *vmarea_allocator;

void
vmmap_init(void)
{
        vmmap_allocator = slab_allocator_create("vmmap", sizeof(vmmap_t));
        KASSERT(NULL != vmmap_allocator && "failed to create vmmap allocator!");
        vmarea_allocator = slab_allocator_create("vmarea", sizeof(vmarea_t));
        KASSERT(NULL != vmarea_allocator && "failed to create vmarea allocator!");
}

vmarea_t *
vmarea_alloc(void)
{
        vmarea_t *newvma = (vmarea_t *) slab_obj_alloc(vmarea_allocator);
        if (newvma) {
                newvma->vma_vmmap = NULL;
        }
        return newvma;
}

void
vmarea_free(vmarea_t *vma)
{
        KASSERT(NULL != vma);
        slab_obj_free(vmarea_allocator, vma);
}

/* Create a new vmmap, which has no vmareas and does
 * not refer to a process. */
vmmap_t *
vmmap_create(void)
{
	vmmap_t *vmmap;
	vmmap = (vmmap_t*)slab_obj_alloc(vmmap_allocator);
	if(vmmap == NULL){
		return NULL;
	}
	memset(vmmap, 0, sizeof(vmmap_t));
	list_init(&vmmap->vmm_list);
/*        NOT_YET_IMPLEMENTED("VM: vmmap_create");*/
        return vmmap;
}

/* Removes all vmareas from the address space and frees the
 * vmmap struct. */
void
vmmap_destroy(vmmap_t *map)
{
	KASSERT(NULL != map);
    dbg(DBG_VM,"(GRADING 1.a) map is not NULL!\n");

	vmarea_t *vmarea;
	list_iterate_begin(&map->vmm_list, vmarea, vmarea_t, vma_plink){
		list_remove(&vmarea->vma_plink);
/*		slab_obj_free(vmarea_allocator, vmarea);*/
	}list_iterate_end();
	slab_obj_free(vmmap_allocator,map);
/*        NOT_YET_IMPLEMENTED("VM: vmmap_destroy");*/
}

/* Add a vmarea to an address space. Assumes (i.e. asserts to some extent)
 * the vmarea is valid.  This involves finding where to put it in the list
 * of VM areas, and adding it. Don't forget to set the vma_vmmap for the
 * area. */
void
vmmap_insert(vmmap_t *map, vmarea_t *newvma)
{
    KASSERT(NULL != map && NULL != newvma);
    dbg(DBG_VMMAP,"(GRADING 1.b) In vmmap_insert(): map and newvma is not NULL\n");

    KASSERT(NULL == newvma->vma_vmmap);
    dbg(DBG_VMMAP,"(GRADING 1.b) In vmmap_insert(): newvma->vma_vmmap is not NULL\n");

    KASSERT(newvma->vma_start < newvma->vma_end);
    dbg(DBG_VMMAP,"(GRADING 1.b) newvma->vma_start is greater than newvma->vma_end\n");

    KASSERT(ADDR_TO_PN(USER_MEM_LOW) <= newvma->vma_start && ADDR_TO_PN(USER_MEM_HIGH) >= newvma->vma_end);
    dbg(DBG_VMMAP,"(GRADING 1.b) newvma is in the right position\n");

	uint32_t vfn=0;
	vmarea_t *oldvma;
	uint32_t objstart;
	if(list_empty(&map->vmm_list)){
		list_insert_head(&map->vmm_list,&newvma->vma_plink);
		newvma->vma_vmmap = map;
		return;
	}
	else{
		oldvma=list_head(&map->vmm_list,vmarea_t,vma_plink);
		objstart=oldvma->vma_start;
		list_iterate_begin(&map->vmm_list,oldvma,vmarea_t,vma_plink){
			
			if(oldvma->vma_start > newvma->vma_end && newvma->vma_start>= vfn){
				list_insert_before(&oldvma->vma_plink,&newvma->vma_plink);
				newvma->vma_vmmap = map;
				newvma->vma_off=newvma->vma_start-objstart;
				return;
			}
			else{
				vfn = oldvma->vma_end;
			}
		}list_iterate_end();
	}
	list_insert_tail(&map->vmm_list,&newvma->vma_plink);
	newvma->vma_vmmap = map;
/*        NOT_YET_IMPLEMENTED("VM: vmmap_insert");*/
}

/* Find a contiguous range of free virtual pages of length npages in
 * the given address space. Returns starting vfn for the range,
 * without altering the map. Returns -1 if no such range exists.
 *
 * Your algorithm should be first fit. If dir is VMMAP_DIR_HILO, you
 * should find a gap as high in the address space as possible; if dir
 * is VMMAP_DIR_LOHI, the gap should be as low as possible. */
int
vmmap_find_range(vmmap_t *map, uint32_t npages, int dir)
{
    KASSERT(NULL != map);
    dbg(DBG_VMMAP,"(GRADING 1.c) map is no NULL\n");
    KASSERT(0 < npages);
    dbg(DBG_VMMAP,"(GRADING 1.c) npages is greater than 0\n");

	dbg(DBG_VMMAP,"enter vmmap_find_range to find %u pages\n",npages);
	vmarea_t *vmarea;
	uint32_t vfnend, end;
	if(dir == VMMAP_DIR_LOHI){
		vfnend = ADDR_TO_PN(USER_MEM_LOW), end = ADDR_TO_PN(USER_MEM_HIGH);
		list_iterate_begin(&map->vmm_list,vmarea,vmarea_t,vma_plink){
			if(vmarea->vma_start - vfnend >= npages){
				dbg(DBG_VMMAP,"1leave vmmap_find_range, find %u pages start at %u\n",npages, vfnend);
				return vfnend;
			}
			vfnend = vmarea->vma_end;
		}list_iterate_end();
		if(end - vfnend >= npages){
			dbg(DBG_VMMAP,"2leave vmmap_find_range, find %u pages start at %u\n",npages, vfnend);
			return vfnend;
		}
	}
	else{
		end = ADDR_TO_PN(USER_MEM_LOW), vfnend = ADDR_TO_PN(USER_MEM_HIGH);
		list_iterate_reverse(&map->vmm_list,vmarea,vmarea_t,vma_plink){
			if(vfnend - vmarea->vma_end >= npages){
				dbg(DBG_VMMAP,"3leave vmmap_find_range, find %u pages start at %u\n",npages, vmarea->vma_end);
				return vmarea->vma_end;
			}
			vfnend = vmarea->vma_start;
		}list_iterate_end();
		if(vfnend - end >= npages){
			dbg(DBG_VMMAP,"4leave vmmap_find_range, find %u pages start at %u\n",npages, vfnend);
			return end;
		}
	}

/*        NOT_YET_IMPLEMENTED("VM: vmmap_find_range");*/
        return -1;
}

/* Find the vm_area that vfn lies in. Simply scan the address space
 * looking for a vma whose range covers vfn. If the page is unmapped,
 * return NULL. */
vmarea_t *
vmmap_lookup(vmmap_t *map, uint32_t vfn)
{
	KASSERT(NULL != map);
    dbg(DBG_VMMAP,"(GRADING 1.d) map is not NULL\n");

	dbg(DBG_VMMAP,"entered vmmap look up for vfn %u\n",vfn);
	vmarea_t *vmarea;
	list_iterate_begin(&map->vmm_list,vmarea,vmarea_t,vma_plink){
		if(vmarea->vma_start <= vfn && vmarea->vma_end > vfn){
			dbg(DBG_VMMAP,"leaving vmmap look up for vfn %u\n",vfn);
			return vmarea;
		}
	}list_iterate_end();
/*        NOT_YET_IMPLEMENTED("VM: vmmap_lookup");*/
	dbg(DBG_VMMAP,"leaving vmmap look up for vfn %u\n",vfn);
        return NULL;
}

/* Allocates a new vmmap containing a new vmarea for each area in the
 * given map. The areas should have no mmobjs set yet. Returns pointer
 * to the new vmmap on success, NULL on failure. This function is
 * called when implementing fork(2). */
vmmap_t *
vmmap_clone(vmmap_t *map)
{
	vmmap_t *newmap;
	vmarea_t *oldarea, *newarea;
	newmap = vmmap_create();
	newarea = NULL;
	if(newarea == NULL || newmap == NULL){
		return NULL;
	}
	list_iterate_begin(&map->vmm_list,oldarea,vmarea_t,vma_plink){
		newarea = (vmarea_t*)slab_obj_alloc(vmarea_allocator);
		newarea->vma_end = oldarea->vma_end;
		newarea->vma_flags = oldarea->vma_flags;
		newarea->vma_off = oldarea->vma_off;
		newarea->vma_prot = oldarea->vma_prot;
		newarea->vma_start = oldarea->vma_start;
		newarea->vma_vmmap = newmap;
		newarea->vma_obj = NULL;
		list_init(&(newarea->vma_olink));/*what is olink?*/
		
		list_insert_tail(&(newmap->vmm_list),&(newarea->vma_plink));

	}list_iterate_end();
/*        NOT_YET_IMPLEMENTED("VM: vmmap_clone");*/
        return newmap;
}

/* Insert a mapping into the map starting at lopage for npages pages.
 * If lopage is zero, we will find a range of virtual addresses in the
 * process that is big enough, by using vmmap_find_range with the same
 * dir argument.  If lopage is non-zero and the specified region
 * contains another mapping that mapping should be unmapped.
 *
 * If file is NULL an anon mmobj will be used to create a mapping
 * of 0's.  If file is non-null that vnode's file will be mapped in
 * for the given range.  Use the vnode's mmap operation to get the
 * mmobj for the file; do not assume it is file->vn_obj. Make sure all
 * of the area's fields except for vma_obj have been set before
 * calling mmap.
 *
 * If MAP_PRIVATE is specified set up a shadow object for the mmobj.
 *
 * All of the input to this function should be valid (KASSERT!).
 * See mmap(2) for for description of legal input.
 * Note that off should be page aligned.
 *
 * Be very careful about the order operations are performed in here. Some
 * operation are impossible to undo and should be saved until there
 * is no chance of failure.
 *
 * If 'new' is non-NULL a pointer to the new vmarea_t should be stored in it.
 */
int
vmmap_map(vmmap_t *map, vnode_t *file, uint32_t lopage, uint32_t npages,
          int prot, int flags, off_t off, int dir, vmarea_t **new)
{
	
	dbg(DBG_VMMAP,"enter vmmap_map, map page %u to %u,off is %u\n",lopage,lopage+npages,off);
	KASSERT(map!=NULL);
	dbg(DBG_VMMAP,"(GRADING 1.f) map is not NULL\n");

	KASSERT(0 < npages);
	dbg(DBG_VMMAP,"(GRADING 1.f) npages is greater than 0\n");

	KASSERT(!(~(PROT_NONE | PROT_READ | PROT_WRITE | PROT_EXEC) & prot));
	dbg(DBG_VMMAP,"(GRADING 1.f) prot is set\n");


	KASSERT((MAP_SHARED & flags) || (MAP_PRIVATE & flags));
	dbg(DBG_VMMAP,"(GRADING 1.f) flsgs is set in shared or private are all \n");

	KASSERT((0 == lopage) || (ADDR_TO_PN(USER_MEM_LOW) <= lopage));
	dbg(DBG_VMMAP,"(GRADING 1.f) lopage is higher than USER_MEM_LOW \n");

	KASSERT((0 == lopage) || (ADDR_TO_PN(USER_MEM_HIGH) >= (lopage + npages)));
	dbg(DBG_VMMAP,"(GRADING 1.f) lopage + npages is lower than USER_MEM_HIGH\n");

	KASSERT(PAGE_ALIGNED(off));
	int returncode;
	uint32_t i=0;
	pframe_t *pf;
	vmarea_t *vmarea;
	vmarea = vmarea_alloc();
	if(vmarea == NULL){
		return -1;
	}
	vmarea->vma_flags = flags;
	vmarea->vma_off = off/PAGE_SIZE;
	vmarea->vma_prot = prot;
	
	
	if(lopage == 0){
		if(vmmap_find_range(map, npages, dir) == -1){
			return -1;/*if no range larger than npages, what value should return?*/
		}
		vmarea->vma_start = vmmap_find_range(map, npages, dir);
		vmarea->vma_off = vmarea->vma_start;
		vmarea->vma_end = vmarea->vma_start + npages;
		dbg(DBG_ELF,"Create vmarea %d to pagenum %d\n",vmarea->vma_start,vmarea->vma_end);
		vmmap_insert(map, vmarea);
		if(file != NULL){
			returncode = file->vn_ops->mmap(file,vmarea, &(vmarea->vma_obj));
			if(returncode < 0){
				return -1;
			}
			
				/*dbg(DBG_VMMAP,"pframe_get %d\n",vmarea->vma_start);
				pframe_get(vmarea->vma_obj,vmarea->vma_off,&pf);*/
			
			
			vmarea->vma_obj->mmo_ops->ref(vmarea->vma_obj);
			vref(file);/*not sure*/
		}
		else{/*what about off proct flags*/
			
			vmarea->vma_obj = anon_create();
			
				/*dbg(DBG_VMMAP,"pframe_get %d\n",vmarea->vma_start);
				pframe_get(vmarea->vma_obj,vmarea->vma_off,&pf);
				pframe_pin(pf);*/
			vmarea->vma_flags=8;			
			/*vmarea->vma_obj->mmo_ops->ref(vmarea->vma_obj);*/
			/*need to increase ref?*/
			vmarea->vma_prot = PROT_NONE;
			
		}
	}
	else{
		if(vmmap_is_range_empty(map, lopage, npages)==0){/*range is mapped */
			if(vmmap_remove(map, lopage, npages)==-1){
				return -1;
			}
		}
		vmarea->vma_start = lopage;
		vmarea->vma_end = lopage + npages;
		vmmap_insert(map, vmarea);
		if(file != NULL){
			pframe_t *pf;
			returncode = file->vn_ops->mmap(file,vmarea, &(vmarea->vma_obj));
			if(returncode < 0){
				return -1;
			}
		
				/*dbg(DBG_VMMAP,"pframe_get %d\n",vmarea->vma_start);
				pframe_get(vmarea->vma_obj,vmarea->vma_off,&pf);*/
				
			
			vmarea->vma_obj->mmo_ops->ref(vmarea->vma_obj);
			vref(file);/*not sure*/
		}
		else{/*what about off proct flags*/
			vmarea->vma_obj = anon_create();
			
				/*dbg(DBG_VMMAP,"pframe_get %d\n",vmarea->vma_start);
				pframe_get(vmarea->vma_obj,vmarea->vma_off,&pf);
				pframe_pin(pf);*/
			vmarea->vma_flags=8;
			vmarea->vma_obj->mmo_ops->ref(vmarea->vma_obj);/*not sure*/
			/*need to increase ref?*/
			vmarea->vma_prot = PROT_NONE;
			
		}

	}

	if((flags&MAP_PRIVATE) != 0){
		mmobj_t *shadowobj;
		shadowobj = shadow_create();
		shadowobj->mmo_un.mmo_bottom_obj = vmarea->vma_obj;

		shadowobj->mmo_shadowed = vmarea->vma_obj;
		vmarea->vma_obj = shadowobj;
	}
	else
		list_insert_tail(&(vmarea->vma_obj->mmo_un.mmo_vmas),&(vmarea->vma_olink));
	dbg(DBG_ELF,"Create vmarea %d to pagenum %d\n",vmarea->vma_start,vmarea->vma_end);
	if(new!=NULL)
		*new = vmarea;
 /*       NOT_YET_IMPLEMENTED("VM: vmmap_map");*/
	dbg(DBG_VMMAP,"leave vmmap_map, map page %u to %u\n",lopage,lopage+npages);
        return 0;
}

/*
 * We have no guarantee that the region of the address space being
 * unmapped will play nicely with our list of vmareas.
 *
 * You must iterate over each vmarea that is partially or wholly covered
 * by the address range [addr ... addr+len). The vm-area will fall into one
 * of four cases, as illustrated below:
 *
 * key:
 *          [             ]   Existing VM Area
 *        *******             Region to be unmapped
 *
 * Case 1:  [   ******    ]
 * The region to be unmapped lies completely inside the vmarea. We need to
 * split the old vmarea into two vmareas. be sure to increment the
 * reference count to the file associated with the vmarea.
 *
 * Case 2:  [      *******]**
 * The region overlaps the end of the vmarea. Just shorten the length of
 * the mapping.
 *
 * Case 3: *[*****        ]
 * The region overlaps the beginning of the vmarea. Move the beginning of
 * the mapping (remember to update vma_off), and shorten its length.
 *
 * Case 4: *[*************]**
 * The region completely contains the vmarea. Remove the vmarea from the
 * list.
 */
int
vmmap_remove(vmmap_t *map, uint32_t lopage, uint32_t npages)
{
	dbg(DBG_VMMAP,"enter vmmap_remove, remove page %u to %u\n",lopage,lopage+npages);
	vmarea_t *vmarea;
	list_iterate_begin(&map->vmm_list,vmarea,vmarea_t,vma_plink){
/*		if(lopage < vmarea->vma_start && lopage+npages-1 < vmarea->vma_start){
			return -1;
		}*/
		if(lopage<=vmarea->vma_start){
			if(lopage+npages-1>=vmarea->vma_end-1){/*totally overlap,case4*/
				list_remove(&(vmarea->vma_plink));
				vmarea_free(vmarea);
			}
			else{/*   *[*****        ]      */
				vmarea->vma_start = lopage + npages;
				vmarea->vma_off = vmarea->vma_off + lopage +npages -vmarea->vma_start;
				/*how to update off?*/
			}
		}
		else if(lopage>vmarea->vma_start&&lopage<vmarea->vma_end){
			if(lopage+npages-1>=vmarea->vma_end-1){/*    [      *******]**    */
				vmarea->vma_end = lopage;
			}
			else{/*[   ******    ]*/
				vmarea->vma_obj->mmo_ops->ref(vmarea->vma_obj);
				vmarea_t *vmarea2;
				vmarea2 = vmarea_alloc();
				vmarea2->vma_start = vmarea->vma_start;
				vmarea2->vma_end = lopage;
				vmarea2->vma_flags = vmarea->vma_flags;
				vmarea2->vma_obj = vmarea->vma_obj;
				vmarea2->vma_off = vmarea->vma_off;
				vmarea2->vma_olink = vmarea->vma_olink;/*what's this for*/
				vmarea2->vma_prot = vmarea->vma_prot;
				vmarea2->vma_vmmap = vmarea->vma_vmmap;
				list_insert_before(&(vmarea->vma_plink),&(vmarea2->vma_plink));
				vmarea->vma_start = lopage+npages;
				/*need to update off?*/
			}
		}
	}list_iterate_end();
       /* NOT_YET_IMPLEMENTED("VM: vmmap_remove");*/
	dbg(DBG_VMMAP,"leave vmmap_remove, map page %u to %u\n",lopage,lopage+npages);
        return 0;
}

/*
 * Returns 1 if the given address space has no mappings for the
 * given range, 0 otherwise.
 */
int
vmmap_is_range_empty(vmmap_t *map, uint32_t startvfn, uint32_t npages)
{
	KASSERT(npages>0&&startvfn>=ADDR_TO_PN(USER_MEM_LOW)&&startvfn<ADDR_TO_PN(USER_MEM_HIGH));
	dbg(DBG_VMMAP,"(GRADING 1.e) npages and startvfn are in right values\n");

	vmarea_t *vmarea;
	
	list_iterate_begin(&map->vmm_list,vmarea,vmarea_t,vma_plink){
		if((startvfn>=vmarea->vma_start&&startvfn<vmarea->vma_end) ||((startvfn+npages-1) >= vmarea->vma_start&&(startvfn+npages-1)<vmarea->vma_end))
			return 0;
		
		/*end = vmarea->vma_end;*/
	}list_iterate_end();
	return 1;
 /*       NOT_YET_IMPLEMENTED("VM: vmmap_is_range_empty");*/
        
}

/* Read into 'buf' from the virtual address space of 'map' starting at
 * 'vaddr' for size 'count'. To do so, you will want to find the vmareas
 * to read from, then find the pframes within those vmareas corresponding
 * to the virtual addresses you want to read, and then read from the
 * physical memory that pframe points to. You should not check permissions
 * of the areas. Assume (KASSERT) that all the areas you are accessing exist.
 * Returns 0 on success, -errno on error.
 */
int
vmmap_read(vmmap_t *map, const void *vaddr, void *buf, size_t count)
{
	vmarea_t *vmarea;
	uint32_t start = ADDR_TO_PN((uint32_t)vaddr), end = ADDR_TO_PN((uint32_t)vaddr+count);/*addr to page num?*/
	dbg(DBG_VMMAP,"enter vmmap_read, read %u bytes from pn %u\n",count,start);
	vmarea = vmmap_lookup(map,start);
	if(vmarea == NULL){
		return -EFAULT;
	}
	uint32_t tmpoff=vmarea->vma_off;
	pframe_t *pframe;
	while(count>0){

		if(pframe_get(vmarea->vma_obj,start-vmarea->vma_start+tmpoff,&pframe)<0){
			return -EFAULT;
		}
		size_t cpy_count = count<PAGE_SIZE-PAGE_OFFSET(vaddr)?count:PAGE_SIZE-PAGE_OFFSET(vaddr);
		memcpy(buf, (void*)((size_t)pframe->pf_addr+PAGE_OFFSET(vaddr)), cpy_count); /*not sure whether dereferencing is needed in page_offset */
		buf = (void*)((size_t)buf + cpy_count);
		count = count - cpy_count;
		vaddr = (void*)((size_t)vaddr + cpy_count);
		
		tmpoff = tmpoff+1;
	}
	dbg(DBG_VMMAP,"leave vmmap_read, read %u bytes from pn %u\n",count,start);
/*        NOT_YET_IMPLEMENTED("VM: vmmap_read");*/
        return 0;
}

/* Write from 'buf' into the virtual address space of 'map' starting at
 * 'vaddr' for size 'count'. To do this, you will need to find the correct
 * vmareas to write into, then find the correct pframes within those vmareas,
 * and finally write into the physical addresses that those pframes correspond
 * to. You should not check permissions of the areas you use. Assume (KASSERT)
 * that all the areas you are accessing exist. Remember to dirty pages!
 * Returns 0 on success, -errno on error.
 */
int
vmmap_write(vmmap_t *map, void *vaddr, const void *buf, size_t count)
{
	
	vmarea_t *vmarea;
	uint32_t start = ADDR_TO_PN((uint32_t)vaddr);/*addr to page num?*/
	dbg(DBG_VMMAP,"enter vmmap_write, write to pn %u  %u bytes\n",start, count);

	vmarea = vmmap_lookup(map,start);
	if(vmarea == NULL){
		return -EFAULT;
	}
	uint32_t tmpoff = vmarea->vma_off;
	pframe_t *pframe;
	while(count>0){
		if(pframe_lookup(vmarea->vma_obj,tmpoff+start-vmarea->vma_start,1,&pframe)<0){
			return -EFAULT;
		}
		size_t offset = PAGE_OFFSET(vaddr);
		size_t cpy_count = count<PAGE_SIZE-PAGE_OFFSET(vaddr)?count:PAGE_SIZE-PAGE_OFFSET(vaddr);
		pframe_dirty(pframe);
		memcpy((void*)((size_t)pframe->pf_addr+PAGE_OFFSET(vaddr)), (void*)buf, cpy_count); /*not sure whether dereferencing is needed in page_offset */
		buf = (void*)((size_t)buf + cpy_count);
		count = count - cpy_count;
		vaddr = (void*)((size_t)vaddr + cpy_count);
		tmpoff = tmpoff+1;
	}
/*        NOT_YET_IMPLEMENTED("VM: vmmap_write");*/
	dbg(DBG_VMMAP,"leave vmmap_write, write to pn %u  %u bytes\n",start, count);
        return 0;
}
/* a debugging routine: dumps the mappings of the given address space. */
size_t
vmmap_mapping_info(const void *vmmap, char *buf, size_t osize)
{

        vmmap_t *map = (vmmap_t *)vmmap;
        vmarea_t *vma;
        ssize_t size = (ssize_t)osize;

        int len = snprintf(buf, size, "%21s %5s %7s %8s %10s %12s\n",
                           "VADDR RANGE", "PROT", "FLAGS", "MMOBJ", "OFFSET",
                           "VFN RANGE");

        list_iterate_begin(&map->vmm_list, vma, vmarea_t, vma_plink) {
                size -= len;
                buf += len;
                if (0 >= size) {
                        goto end;
                }

                len = snprintf(buf, size,
                               "%#.8x-%#.8x  %c%c%c  %7s 0x%p %#.5x %#.5x-%#.5x\n",
                               vma->vma_start << PAGE_SHIFT,
                               vma->vma_end << PAGE_SHIFT,
                               (vma->vma_prot & PROT_READ ? 'r' : '-'),
                               (vma->vma_prot & PROT_WRITE ? 'w' : '-'),
                               (vma->vma_prot & PROT_EXEC ? 'x' : '-'),
                               (vma->vma_flags & MAP_SHARED ? " SHARED" : "PRIVATE"),
                               vma->vma_obj, vma->vma_off, vma->vma_start, vma->vma_end);
        } list_iterate_end();

end:
        if (size <= 0) {
                size = osize;
                buf[osize - 1] = '\0';
        }
        /*
        KASSERT(0 <= size);
        if (0 == size) {
                size++;
                buf--;
                buf[0] = '\0';
        }
        */
        return osize - size;
}
