#include "kernel.h"
#include "errno.h"
#include "globals.h"

#include "vm/vmmap.h"
#include "vm/shadow.h"
#include "vm/anon.h"

#include "proc/proc.h"

#include "util/debug.h"
#include "util/list.h"
#include "util/string.h"
#include "util/printf.h"

#include "fs/vnode.h"
#include "fs/file.h"
#include "fs/fcntl.h"
#include "fs/vfs_syscall.h"

#include "mm/slab.h"
#include "mm/page.h"
#include "mm/mm.h"
#include "mm/mman.h"
#include "mm/mmobj.h"

static slab_allocator_t *vmmap_allocator;
static slab_allocator_t *vmarea_allocator;

void
vmmap_init(void)
{
        vmmap_allocator = slab_allocator_create("vmmap", sizeof(vmmap_t));
        KASSERT(NULL != vmmap_allocator && "failed to create vmmap allocator!");
        vmarea_allocator = slab_allocator_create("vmarea", sizeof(vmarea_t));
        KASSERT(NULL != vmarea_allocator && "failed to create vmarea allocator!");
}

vmarea_t *
vmarea_alloc(void)
{
        vmarea_t *newvma = (vmarea_t *) slab_obj_alloc(vmarea_allocator);
        if (newvma) {
                newvma->vma_vmmap = NULL;
		newvma->vma_obj = NULL;
		newvma->vma_vmmap = NULL;
		newvma->vma_end = 0;
		newvma->vma_start= 0;
		newvma->vma_off = 0;
		newvma->vma_prot = 0;
		newvma->vma_flags = 0;
		list_link_init(&newvma->vma_olink);
		list_link_init(&newvma->vma_plink);
        }
        return newvma;
}

void
vmarea_free(vmarea_t *vma)
{
        KASSERT(NULL != vma);
        slab_obj_free(vmarea_allocator, vma);
}

/* Create a new vmmap, which has no vmareas and does
 * not refer to a process. */
vmmap_t *
vmmap_create(void)
{
	vmmap_t *new_vmmap = NULL;
	
	new_vmmap = (vmmap_t *)slab_obj_alloc(vmmap_allocator);
	if (new_vmmap) {
		list_init(&new_vmmap->vmm_list);
		new_vmmap->vmm_proc = NULL;
	}

	return new_vmmap;
}

/* Removes all vmareas from the address space and frees the
 * vmmap struct. */
void
vmmap_destroy(vmmap_t *map)
{
	KASSERT(map != NULL);
	dbg(DBG_TEST, "KASSERT PASS\n");

	int n = 0;
	vmarea_t *remove_vmarea = NULL;

	while (!list_empty(&map->vmm_list)) {
		remove_vmarea = list_head(&map->vmm_list, vmarea_t, vma_plink);
		list_remove(&remove_vmarea->vma_olink);
		list_remove(&remove_vmarea->vma_plink);
		remove_vmarea->vma_obj->mmo_ops->put(remove_vmarea->vma_obj);
		vmarea_free(remove_vmarea);
		n++;
	}

	slab_obj_free(vmmap_allocator, map);
}

/* Add a vmarea to an address space. Assumes (i.e. asserts to some extent)
 * the vmarea is valid.  This involves finding where to put it in the list
 * of VM areas, and adding it. Don't forget to set the vma_vmmap for the
 * area. */
void
vmmap_insert(vmmap_t *map, vmarea_t *newvma)
{
	KASSERT(map != NULL && newvma != NULL);
	KASSERT(newvma->vma_start < newvma->vma_end);
	KASSERT(newvma->vma_vmmap != NULL);
	KASSERT(ADDR_TO_PN(USER_MEM_LOW) <= newvma->vma_start && ADDR_TO_PN(USER_MEM_HIGH) >= newvma->vma_end);
	dbg(DBG_TEST, "KASSERT PASS\n");

	vmarea_t *temp_vmarea = NULL;

	if (list_empty(&map->vmm_list)) {
		list_insert_head(&map->vmm_list, &newvma->vma_plink);
		newvma->vma_vmmap = map;
		return;
	}

        list_iterate_begin(&map->vmm_list, temp_vmarea, vmarea_t, vma_plink) {
		if ((newvma->vma_end) <= (temp_vmarea->vma_start)) {
			list_insert_before(&temp_vmarea->vma_plink, &newvma->vma_plink);
			newvma->vma_vmmap = map;
			return;
		}

		if ((newvma->vma_start) >= (temp_vmarea->vma_end)) {
			if (temp_vmarea == list_tail(&map->vmm_list, vmarea_t, vma_plink)) {
				list_insert_tail(&map->vmm_list, &newvma->vma_plink);
				newvma->vma_vmmap = map;
				return;
			}
			else {
				continue;
			}
		}

		/* if reach here, it means newvma overlaps with old vmarea */
		panic("not a valid vmarea\n");
        } list_iterate_end();
}

/* Find a contiguous range of free virtual pages of length npages in
 * the given address space. Returns starting vfn for the range,
 * without altering the map. Returns -1 if no such range exists.
 *
 * Your algorithm should be first fit. If dir is VMMAP_DIR_HILO, you
 * should find a gap as high in the address space as possible; if dir
 * is VMMAP_DIR_LOHI, the gap should be as low as possible. */
int
vmmap_find_range(vmmap_t *map, uint32_t npages, int dir)
{
	KASSERT(map != NULL);
	KASSERT(npages > 0);
	dbg(DBG_TEST, "KASSERT PASS\n");

	uint32_t lowest_vfn = ADDR_TO_PN(USER_MEM_LOW);
	uint32_t highest_vfn = ADDR_TO_PN(USER_MEM_HIGH);
	vmarea_t *prev_vmarea = NULL;
	vmarea_t *next_vmarea = NULL;
	vmarea_t *temp_vmarea = NULL;

	if (dir == VMMAP_DIR_LOHI) {
		if (list_empty(&map->vmm_list)) {
			return (int)lowest_vfn;
		}

        	list_iterate_begin(&map->vmm_list, temp_vmarea, vmarea_t, vma_plink) {
			if (temp_vmarea == list_head(&map->vmm_list, vmarea_t, vma_plink)) {
				if (temp_vmarea->vma_start >= (lowest_vfn + npages)) {
					return (int)lowest_vfn;
				}
			}
			else {
				if ((prev_vmarea->vma_end + npages) <= (temp_vmarea->vma_start)) {
					return (int)(prev_vmarea->vma_end);
				}
			}

			if (temp_vmarea == list_tail(&map->vmm_list, vmarea_t, vma_plink)) {
				if ((temp_vmarea->vma_end + npages) <= highest_vfn) {
					return (int)(temp_vmarea->vma_end);
				}
				else {
					return -1;
				}
			}
			else {
				prev_vmarea = temp_vmarea;
				continue;
			}
        	} list_iterate_end();
	}

	if (dir == VMMAP_DIR_HILO) {
		if (list_empty(&map->vmm_list)) {
			return (int)(highest_vfn - npages);
		}
		
        	list_iterate_reverse(&map->vmm_list, temp_vmarea, vmarea_t, vma_plink) {
			if (temp_vmarea == list_tail(&map->vmm_list, vmarea_t, vma_plink)) {
				if ((temp_vmarea->vma_end + npages) <= highest_vfn) {
					return (int)(temp_vmarea->vma_end);
				}
			}
			else {
				if ((next_vmarea->vma_start ) >= (temp_vmarea->vma_end + npages)) {
					return (int)(temp_vmarea->vma_end);
				}
			}

			if (temp_vmarea == list_head(&map->vmm_list, vmarea_t, vma_plink)) {
				if (temp_vmarea->vma_start >= (lowest_vfn + npages)) {
					return (int)(lowest_vfn);
				}
				else {
					return -1;
				}
			}
			else {
				next_vmarea = temp_vmarea;
				continue;
			}
        	} list_iterate_end();
	}

	return -EINVAL;
}

/* Find the vm_area that vfn lies in. Simply scan the address space
 * looking for a vma whose range covers vfn. If the page is unmapped,
 * return NULL. */
vmarea_t *
vmmap_lookup(vmmap_t *map, uint32_t vfn)
{
	KASSERT(map != NULL);
	dbg(DBG_TEST, "KASSERT PASS\n");

	vmarea_t *temp_vmarea = NULL;
	
        list_iterate_begin(&map->vmm_list, temp_vmarea, vmarea_t, vma_plink) {
		if ((vfn >= temp_vmarea->vma_start) && (vfn < temp_vmarea->vma_end)) {
			return temp_vmarea;
		}

		if (vfn < temp_vmarea->vma_start) {
			return NULL;
		}
	} list_iterate_end();
	return NULL;
}

/* Allocates a new vmmap containing a new vmarea for each area in the
 * given map. The areas should have no mmobjs set yet. Returns pointer
 * to the new vmmap on success, NULL on failure. This function is
 * called when implementing fork(2). */
vmmap_t *
vmmap_clone(vmmap_t *map)
{
	vmmap_t *new_vmmap = NULL;
	vmarea_t *temp_vmarea = NULL;
	vmarea_t *new_vmarea = NULL;

	new_vmmap = vmmap_create();
	if (new_vmmap) {
        	list_iterate_begin(&map->vmm_list, temp_vmarea, vmarea_t, vma_plink) {
			new_vmarea = vmarea_alloc();
			if (new_vmarea) {
				new_vmarea->vma_start = temp_vmarea->vma_start;
				new_vmarea->vma_end = temp_vmarea->vma_end;
				new_vmarea->vma_off = temp_vmarea->vma_off;
				new_vmarea->vma_prot = temp_vmarea->vma_prot;
				new_vmarea->vma_flags = temp_vmarea->vma_flags;
				vmmap_insert(new_vmmap, new_vmarea);
			}
			else {
				vmmap_destroy(new_vmmap);
				return NULL;
			}
		}list_iterate_end();
		return new_vmmap;
	}
	else {
		return NULL;
	}
}

/* Insert a mapping into the map starting at lopage for npages pages.
 * If lopage is zero, we will find a range of virtual addresses in the
 * process that is big enough, by using vmmap_find_range with the same
 * dir argument.  If lopage is non-zero and the specified region
 * contains another mapping that mapping should be unmapped.
 *
 * If file is NULL an anon mmobj will be used to create a mapping
 * of 0's.  If file is non-null that vnode's file will be mapped in
 * for the given range.  Use the vnode's mmap operation to get the
 * mmobj for the file; do not assume it is file->vn_obj. Make sure all
 * of the area's fields except for vma_obj have been set before
 * calling mmap.
 *
 * If MAP_PRIVATE is specified set up a shadow object for the mmobj.
 *
 * All of the input to this function should be valid (KASSERT!).
 * See mmap(2) for for description of legal input.
 * Note that off should be page aligned.
 *
 * Be very careful about the order operations are performed in here. Some
 * operation are impossible to undo and should be saved until there
 * is no chance of failure.
 *
 * If 'new' is non-NULL a pointer to the new vmarea_t should be stored in it.
 */
int
vmmap_map(vmmap_t *map, vnode_t *file, uint32_t lopage, uint32_t npages,
          int prot, int flags, off_t off, int dir, vmarea_t **new)
{
	KASSERT(map != NULL);
	KASSERT(npages > 0);
	KASSERT(!(prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_READ)));
	KASSERT((flags & MAP_PRIVATE) || (flags & MAP_SHARED));
	KASSERT((lopage == 0) || (ADDR_TO_PN(USER_MEM_LOW) <= lopage));
	KASSERT((lopage == 0) || (ADDR_TO_PN(USER_MEM_HIGH) >=(lopage + npages)));
	KASSERT(PAGE_ALIGNED(off));
	dbg(DBG_TEST, "KASSERT PASS\n");

	int ret = 0;
	uint32_t n_mmaped = 0;
	int startvfn = 0;
	mmobj_t *new_mmobj = NULL;
	vmarea_t *new_vmarea = NULL;
	mmobj_t *new_shadow = NULL;
	pframe_t *new_pframe = NULL;
	
	new_vmarea = vmarea_alloc();
	if (!new_vmarea) {
		return -ENOMEM;
	}

	if (lopage == 0) {
		if ((startvfn = vmmap_find_range(map, npages, dir)) == -1) {
			vmarea_free(new_vmarea);
			return -EINVAL;
		}
	}
	else {
		startvfn = (int)lopage;
	}
	
	new_vmarea->vma_start = (uint32_t)startvfn;
	new_vmarea->vma_end = (uint32_t)startvfn + npages;
	new_vmarea->vma_off = (off>>PAGE_SHIFT);
	new_vmarea->vma_prot = prot;
	new_vmarea->vma_flags = flags;
	new_vmarea->vma_vmmap = map;

	if (file == NULL) {
		new_mmobj = anon_create();
		if (!new_mmobj) {
			vmarea_free(new_vmarea);
			return -ENOMEM;
		}
		while (n_mmaped < npages) {
			ret = pframe_get(new_mmobj, new_mmobj->mmo_nrespages, &new_pframe);
			if (ret < 0) {
				while (n_mmaped > 0) {
					pframe_lookup(new_mmobj, new_mmobj->mmo_nrespages - 1, 0, &new_pframe);
					KASSERT(pframe_is_busy(new_pframe));
					pframe_unpin(new_pframe);
					pframe_free(new_pframe);
					n_mmaped--;
				}
				vmarea_free(new_vmarea);
				new_mmobj->mmo_ops->put(new_mmobj);
				return -ENOMEM;
			}
			pframe_pin(new_pframe);
			n_mmaped++;
		}
		list_insert_head(&new_mmobj->mmo_un.mmo_vmas, &new_vmarea->vma_olink);
		new_vmarea->vma_obj = new_mmobj;
	}
	else {
		KASSERT(file->vn_ops->mmap);
		ret = file->vn_ops->mmap(file, new_vmarea, &new_mmobj);
		n_mmaped ++;
		while (n_mmaped < npages) {
			pframe_get(new_mmobj, n_mmaped, &new_pframe);
			if (new_pframe == NULL) {
				panic("1\n");
			}
			n_mmaped++;
		}
		if (ret < 0) {
			vmarea_free(new_vmarea);
			return ret;
		}
		if (!list_link_is_linked(&new_vmarea->vma_olink)) { 
			list_insert_head(&new_mmobj->mmo_un.mmo_vmas, &new_vmarea->vma_olink);
		}
	}
	if (flags & MAP_PRIVATE) {
		new_shadow = shadow_create();
		new_shadow->mmo_un.mmo_bottom_obj = mmobj_bottom_obj(new_mmobj);
		new_shadow->mmo_shadowed = new_mmobj;
		new_vmarea->vma_obj = new_shadow;
	}
	
	if (!vmmap_is_range_empty(map, lopage, npages)) {
		ret = vmmap_remove(map, lopage, npages);
		if (ret < 0) {
			vmarea_free(new_vmarea);
			while (n_mmaped > 0) {
				new_mmobj->mmo_ops->put(new_mmobj);
				n_mmaped--;
			}
			new_shadow->mmo_ops->put(new_shadow);
			return ret;
		}
	}
	
	vmmap_insert(map, new_vmarea);
	if (new != NULL) {
		*new = new_vmarea;
	}
	int n = 0;
	list_iterate_begin(&new_mmobj->mmo_respages, new_pframe, pframe_t, pf_olink) {
		n++;
	}list_iterate_end();
	return 0;
}

/*
 * We have no guarantee that the region of the address space being
 * unmapped will play nicely with our list of vmareas.
 *
 * You must iterate over each vmarea that is partially or wholly covered
 * by the address range [addr ... addr+len). The vm-area will fall into one
 * of four cases, as illustrated below:
 *
 * key:
 *          [             ]   Existing VM Area
 *        *******             Region to be unmapped
 *
 * Case 1:  [   ******    ]
 * The region to be unmapped lies completely inside the vmarea. We need to
 * split the old vmarea into two vmareas. be sure to increment the
 * reference count to the file associated with the vmarea.
 *
 * Case 2:  [      *******]**
 * The region overlaps the end of the vmarea. Just shorten the length of
 * the mapping.
 *
 * Case 3: *[*****        ]
 * The region overlaps the beginning of the vmarea. Move the beginning of
 * the mapping (remember to update vma_off), and shorten its length.
 *
 * Case 4: *[*************]**
 * The region completely contains the vmarea. Remove the vmarea from the
 * list.
 */
int
vmmap_remove(vmmap_t *map, uint32_t lopage, uint32_t npages)
{
	dbgq(DBG_THR, "vmmap_remove(): enter \n");
	KASSERT(map);

	uint32_t hipage = lopage + npages;
	vmarea_t *temp_vmarea = NULL;
	vmarea_t *new_vmarea = NULL;

	list_iterate_begin(&map->vmm_list, temp_vmarea, vmarea_t, vma_plink) {
		if ((temp_vmarea->vma_start < lopage) &&
		    (temp_vmarea->vma_end > hipage)) {
			dbgq(DBG_THR, "vmmap_remove(): case 1 \n");
			new_vmarea = vmarea_alloc();
			if (!new_vmarea) {
				return -ENOMEM;
			}
			new_vmarea->vma_start = temp_vmarea->vma_start;
			new_vmarea->vma_end = lopage;
			new_vmarea->vma_off = temp_vmarea->vma_off;
			new_vmarea->vma_prot = temp_vmarea->vma_prot;
			new_vmarea->vma_flags = temp_vmarea->vma_flags;
			new_vmarea->vma_vmmap = temp_vmarea->vma_vmmap;
			new_vmarea->vma_obj = temp_vmarea->vma_obj;
			temp_vmarea->vma_start = hipage;
			temp_vmarea->vma_off += hipage - temp_vmarea->vma_start;
			vmmap_insert(map, new_vmarea);
			list_insert_before(&temp_vmarea->vma_olink, &new_vmarea->vma_olink);
			temp_vmarea->vma_obj->mmo_ops->ref(temp_vmarea->vma_obj);
			pt_unmap_range(curproc->p_pagedir, (uintptr_t)PN_TO_ADDR(lopage), (uintptr_t)PN_TO_ADDR(hipage));
			return 0;
		}
		if ((temp_vmarea->vma_end > lopage) &&
		   (temp_vmarea->vma_end <= hipage) &&
		   (temp_vmarea->vma_start < lopage)) {
			temp_vmarea->vma_end = lopage;
			pt_unmap_range(curproc->p_pagedir, (uintptr_t)PN_TO_ADDR(lopage), (uintptr_t)PN_TO_ADDR(temp_vmarea->vma_end));
		}
		if ((temp_vmarea->vma_start >= lopage) &&
		    (temp_vmarea->vma_start < hipage) &&
		    (temp_vmarea->vma_end > hipage)) {
			temp_vmarea->vma_off += hipage - temp_vmarea->vma_start;
			temp_vmarea->vma_start = hipage;
			pt_unmap_range(curproc->p_pagedir, (uintptr_t)PN_TO_ADDR(temp_vmarea->vma_start), (uintptr_t)PN_TO_ADDR(hipage));
		}
		if ((temp_vmarea->vma_start >= lopage) && (temp_vmarea->vma_end <= hipage)) {
			list_remove(&temp_vmarea->vma_plink);
			list_remove(&temp_vmarea->vma_olink);
			temp_vmarea->vma_obj->mmo_ops->put(temp_vmarea->vma_obj);
			pt_unmap_range(curproc->p_pagedir, (uintptr_t)PN_TO_ADDR(temp_vmarea->vma_start), (uintptr_t)PN_TO_ADDR(temp_vmarea->vma_end));
		}
	}list_iterate_end();
	
	/* do we need to change the page table? */
	/* yes, but we do not need to flush the tlb, the caller will do */
	dbgq(DBG_THR, "vmmap_remove(): leave \n");
	return 0;
}

/*
 * Returns 1 if the given address space has no mappings for the
 * given range, 0 otherwise.
 */
int
vmmap_is_range_empty(vmmap_t *map, uint32_t startvfn, uint32_t npages)
{
	KASSERT(map);
	KASSERT(npages > 0); 

	uint32_t highest_vfn = ADDR_TO_PN(USER_MEM_HIGH);
	uint32_t endvfn = startvfn + npages;
	vmarea_t *temp_vmarea = NULL;
	KASSERT(ADDR_TO_PN(USER_MEM_LOW) <= startvfn && ADDR_TO_PN(USER_MEM_HIGH) >= endvfn && startvfn < endvfn);

	if (list_empty(&map->vmm_list)) {
		return 1;
	}

        list_iterate_begin(&map->vmm_list, temp_vmarea, vmarea_t, vma_plink) {
		if (startvfn < temp_vmarea->vma_start) {
			if (endvfn <= temp_vmarea->vma_start) {
				return 1;
			}
			else {
				return 0;
			}
		}
		else {
			if (startvfn < temp_vmarea->vma_end) {
				return 0;
			}
			else {
				if (temp_vmarea != list_tail(&map->vmm_list, vmarea_t, vma_plink)) {
					continue;
				}
				else {
					if (endvfn <= highest_vfn) {
						return 1;
					}
					else {
						return 0;
					}
				}
			}
		}
	}list_iterate_end();
	return 0;
}

/* Read into 'buf' from the virtual address space of 'map' starting at
 * 'vaddr' for size 'count'. To do so, you will want to find the vmareas
 * to read from, then find the pframes within those vmareas corresponding
 * to the virtual addresses you want to read, and then read from the
 * physical memory that pframe points to. You should not check permissions
 * of the areas. Assume (KASSERT) that all the areas you are accessing exist.
 * Returns 0 on success, -errno on error.
 */
int
vmmap_read(vmmap_t *map, const void *vaddr, void *buf, size_t count)
{
	KASSERT(map);
	KASSERT(vaddr);
	KASSERT(buf);

	int ret = 0;
	uint32_t vfn = ADDR_TO_PN(vaddr);
	uint32_t the_pagenum = 0;
	uint32_t pageoff = PAGE_OFFSET(vaddr);
	size_t count_from_page = 0;
	vmarea_t *the_vmarea = NULL;
	mmobj_t *the_mmobj = NULL;
	pframe_t *the_pframe = NULL;

	if (count <= 0) {
		return -EINVAL;
	}

	while (count > 0) {
		the_vmarea = vmmap_lookup(map, vfn);
		KASSERT(the_vmarea);
		the_mmobj = the_vmarea->vma_obj;
		the_pagenum = vfn - the_vmarea->vma_start + the_vmarea->vma_off;

		ret = pframe_lookup(the_mmobj, the_pagenum, 0, &the_pframe);	
		if (ret < 0) {
			return ret;
		}
		if ((pageoff + count) > PAGE_SIZE) {
			count_from_page = PAGE_SIZE - pageoff;
		}
		else {
			count_from_page = count;
		}
		memcpy(buf, (void *)((uintptr_t)(the_pframe->pf_addr) + pageoff), count_from_page);

		buf = (void *)((char *)buf + count_from_page);
		vfn += 1;
		count -= count_from_page;
		pageoff = 0;
	}
	return 0;
}

/* Write from 'buf' into the virtual address space of 'map' starting at
 * 'vaddr' for size 'count'. To do this, you will need to find the correct
 * vmareas to write into, then find the correct pframes within those vmareas,
 * and finally write into the physical addresses that those pframes correspond * to. You should not check permissions of the areas you use. Assume (KASSERT)
 * that all the areas you are accessing exist. Remember to dirty pages!
 * Returns 0 on success, -errno on error.
 */
int
vmmap_write(vmmap_t *map, void *vaddr, const void *buf, size_t count)
{	
	KASSERT(map);
	KASSERT(vaddr);
	KASSERT(buf);

	int ret = 0;
	uint32_t vfn = ADDR_TO_PN(vaddr);
	uint32_t the_pagenum = 0;
	int pageoff = PAGE_OFFSET(vaddr);
	size_t count_to_page = 0;
	vmarea_t *the_vmarea = NULL;
	mmobj_t *the_mmobj = NULL;
	pframe_t *the_pframe = NULL;

	if (count <= 0) {
		return -EINVAL;
	}

	while (count > 0) {
		the_vmarea = vmmap_lookup(map, vfn);
		the_mmobj = the_vmarea->vma_obj;
		the_pagenum = vfn - the_vmarea->vma_start + the_vmarea->vma_off;
		if (!(the_vmarea->vma_prot & PROT_READ)) {
			return -EACCES;
		}

		/*copy on write*/
		ret = pframe_get(the_mmobj, the_pagenum, &the_pframe);
		if (ret < 0) {
			return ret;
		}
		if ((pageoff + count) > PAGE_SIZE) {
			count_to_page = PAGE_SIZE - pageoff;
		}
		else {
			count_to_page = count;
		}
		memcpy((void *)((uintptr_t)(the_pframe->pf_addr) + pageoff), buf, count_to_page);
		if (!pframe_is_dirty(the_pframe)) {
			pframe_dirty(the_pframe);
		}

		buf = (const void *)((char *)buf + count_to_page);
		vfn += 1;
		count -= count_to_page;
		pageoff = 0;
	}
	return 0;
}

/* a debugging routine: dumps the mappings of the given address space. */
size_t
vmmap_mapping_info(const void *vmmap, char *buf, size_t osize)
{
        KASSERT(0 < osize);
        KASSERT(NULL != buf);
        KASSERT(NULL != vmmap);

        vmmap_t *map = (vmmap_t *)vmmap;
        vmarea_t *vma;
        ssize_t size = (ssize_t)osize;

        int len = snprintf(buf, size, "%21s %5s %7s %8s %10s %12s\n",
                           "VADDR RANGE", "PROT", "FLAGS", "MMOBJ", "OFFSET",
                           "VFN RANGE");

        list_iterate_begin(&map->vmm_list, vma, vmarea_t, vma_plink) {
                size -= len;
                buf += len;
                if (0 >= size) {
                        goto end;
                }

                len = snprintf(buf, size,
                               "%#.8x-%#.8x  %c%c%c  %7s 0x%p %#.5x %#.5x-%#.5x\n",
                               vma->vma_start << PAGE_SHIFT,
                               vma->vma_end << PAGE_SHIFT,
                               (vma->vma_prot & PROT_READ ? 'r' : '-'),
                               (vma->vma_prot & PROT_WRITE ? 'w' : '-'),
                               (vma->vma_prot & PROT_EXEC ? 'x' : '-'),
                               (vma->vma_flags & MAP_SHARED ? " SHARED" : "PRIVATE"),
                               vma->vma_obj, vma->vma_off, vma->vma_start, vma->vma_end);
        } list_iterate_end();

end:
        if (size <= 0) {
                size = osize;
                buf[osize - 1] = '\0';
        }
        /*
        KASSERT(0 <= size);
        if (0 == size) {
                size++;
                buf--;
                buf[0] = '\0';
        }
        */
        return osize - size;
}
