#include "kernel.h"
#include "errno.h"
#include "globals.h"

#include "vm/vmmap.h"
#include "vm/shadow.h"
#include "vm/anon.h"

#include "proc/proc.h"

#include "util/debug.h"
#include "util/list.h"
#include "util/string.h"
#include "util/printf.h"

#include "fs/vnode.h"
#include "fs/file.h"
#include "fs/fcntl.h"
#include "fs/vfs_syscall.h"

#include "mm/slab.h"
#include "mm/page.h"
#include "mm/mm.h"
#include "mm/mman.h"
#include "mm/mmobj.h"

static slab_allocator_t *vmmap_allocator;
static slab_allocator_t *vmarea_allocator;

void
vmmap_init(void)
{
        vmmap_allocator = slab_allocator_create("vmmap", sizeof(vmmap_t));
        KASSERT(NULL != vmmap_allocator && "failed to create vmmap allocator!");
        vmarea_allocator = slab_allocator_create("vmarea", sizeof(vmarea_t));
        KASSERT(NULL != vmarea_allocator && "failed to create vmarea allocator!");
}

vmarea_t *
vmarea_alloc(void)
{
        vmarea_t *newvma = (vmarea_t *) slab_obj_alloc(vmarea_allocator);
        if (newvma) {
                newvma->vma_vmmap = NULL;
        }
        return newvma;
}

void
vmarea_free(vmarea_t *vma)
{
        KASSERT(NULL != vma);
        slab_obj_free(vmarea_allocator, vma);
}

/* Create a new vmmap, which has no vmareas and does
 * not refer to a process. 
*/
/* Author : Aditya Parikh
 * created new vmmap
 * initialized its vmm list
 * did nothing to its proc
 */
vmmap_t *
vmmap_create(void)
{
dbg(DBG_ELF, "VMMAP CREATE\n");
	vmmap_t *newvmm = (vmmap_t *) slab_obj_alloc(vmmap_allocator);
        if (newvmm)
	{
        	list_init(&(newvmm->vmm_list));
        }
        return newvmm;
        /*NOT_YET_IMPLEMENTED("VM: vmmap_create");
        return NULL;*/
}

/* Removes all vmareas from the address space and frees the
 * vmmap struct. */
/* Author : Aditya Parikh
 * removed all areas from list
 * NOTE not sure whether to free them as well        NOTE yes
 * freed vm map structure
 */
void
vmmap_destroy(vmmap_t *map)
{
KASSERT(NULL != map);
dbg(DBG_ELF,"vmap_destroy: Map is not Null\n");

	list_link_t *link;
	for (link = map->vmm_list.l_next ; link != &(map->vmm_list) ; )
	{
dbg(DBG_ELF, "destroy 3 -------- \n");
		list_link_t *remove_link = link;
		link = link->l_next;
		list_remove(remove_link); 
		/*NOTE  should i free the memory corresponding to vm_area */
	}
dbg(DBG_ELF, "destroy 4 -------- \n");
	slab_obj_free(vmmap_allocator, map);
dbg(DBG_ELF, "destroy 5 -------- \n");
        /*NOT_YET_IMPLEMENTED("VM: vmmap_destroy");*/
}

/* Add a vmarea to an address space. Assumes (i.e. asserts to some extent)
 * the vmarea is valid.  This involves finding where to put it in the list
 * of VM areas, and adding it. Don't forget to set the vma_vmmap for the
 * area. */

/* Author : Aditya Parikh
 * inserting map areas in ascending order w.r.t vma_start
 * NOTE haven't handle the case where range overlaps and have to truncate ranges and fit and sort them
 * assigned map to new memory area's vma_map
 */
void
vmmap_insert(vmmap_t *map, vmarea_t *newvma)
{
KASSERT(NULL != map && NULL != newvma);
dbg(DBG_ELF, "vmmap_insert: map and area are not null\n");
	list_link_t *link;
	vmarea_t *oldvma;
/*KASSERT(NULL == newvma->vma_vmmap);	*/
	newvma->vma_vmmap = map;
KASSERT(newvma->vma_start <= newvma->vma_end);
dbg(DBG_ELF, "vmmap_insert: start is less than or equal to end\n");
KASSERT(ADDR_TO_PN(USER_MEM_LOW) <= newvma->vma_start && ADDR_TO_PN(USER_MEM_HIGH) >= newvma->vma_end);
dbg(DBG_ELF,"Page frame number of start >= USER_MEM_LOW and page frame number of end <= USER_MEM_HIGH\n");
	for (link = map->vmm_list.l_next ; link != &(map->vmm_list) ; link = link->l_next)
	{
		
		oldvma = list_item(link, vmarea_t, vma_plink);
		
		if(oldvma->vma_start > newvma->vma_start)
		{
			list_insert_before(link, &(newvma->vma_plink));
			break;
		}
	}
	if(link==&(map->vmm_list))
	{
		list_insert_before(link, &(newvma->vma_plink));
	}

        /*NOT_YET_IMPLEMENTED("VM: vmmap_insert");*/
}

/* Find a contiguous range of free virtual pages of length npages in
 * the given address space. Returns starting vfn for the range,
 * without altering the map. Returns -1 if no such range exists.
 *
 * Your algorithm should be first fit. If dir is VMMAP_DIR_HILO, you
 * should find a gap as high in the address space as possible; if dir
 * is VMMAP_DIR_LOHI, the gap should be as low as possible. */
/* Author: Aditya Parikh
 * NOTE not sure about USER_MEM_LOW which are addresses and page numbers
 * returning start vfn if contiguous virtual pages of length npages found
 * returning -1 if no such range exits
 * if dir LOHI searching in ascending order else in descending order.
 */
int
vmmap_find_range(vmmap_t *map, uint32_t npages, int dir)
{
KASSERT(NULL != map);
dbg(DBG_ELF,"vmmap_find_range: map is not null\n");
	list_link_t *nlink,*plink;
	vmarea_t *pvma,*nvma;
	
	if(dir==VMMAP_DIR_LOHI)
	{	
		plink = map->vmm_list.l_next;
		pvma = list_item(plink, vmarea_t, vma_plink);
KASSERT(0 < npages);
dbg(DBG_ELF,"vmmap_find_range: npages > 0\n");
		if(( (pvma->vma_start-1) - ADDR_TO_PN(USER_MEM_LOW) + 1)>=npages)
		{
			return ADDR_TO_PN(USER_MEM_LOW);
		}		
		for (nlink = plink->l_next ; nlink != &(map->vmm_list) ; nlink = nlink->l_next,plink = plink->l_next)
		{
			nvma = list_item(nlink, vmarea_t, vma_plink);
			pvma = list_item(plink, vmarea_t, vma_plink);
			if((nvma->vma_start - pvma->vma_end - 1)>=npages)
			{
				return (pvma->vma_end+1);
			}
		}
		pvma = list_item(plink, vmarea_t, vma_plink);		
		if(( ADDR_TO_PN(USER_MEM_HIGH) -1 - (pvma->vma_end+1) + 1)>=npages)
		{
				return (pvma->vma_end+1);
		}
	}
	else if(dir==VMMAP_DIR_HILO)
	{
		plink = map->vmm_list.l_prev;
		pvma = list_item(plink, vmarea_t, vma_plink);
		if(( ADDR_TO_PN(USER_MEM_HIGH) -1 - (pvma->vma_end+1) + 1)>=npages)
		{
				return ADDR_TO_PN(USER_MEM_HIGH - 1);
		}
		for (nlink = plink->l_prev ; nlink != &(map->vmm_list) ; nlink = nlink->l_prev,plink = plink->l_prev)
		{
			nvma = list_item(nlink, vmarea_t, vma_plink);
			pvma = list_item(plink, vmarea_t, vma_plink);
			if((pvma->vma_start - nvma->vma_end - 1)>=npages)
			{
				return (pvma->vma_start-1);
			}
		}
		pvma = list_item(plink, vmarea_t, vma_plink);
		if(( (pvma->vma_start-1) - ADDR_TO_PN(USER_MEM_LOW) + 1)>=npages)
		{
			return (pvma->vma_start-1);
		}
	}
	return -1;        

	/*NOT_YET_IMPLEMENTED("VM: vmmap_find_range");
        return -1;*/
}

/* Find the vm_area that vfn lies in. Simply scan the address space
 * looking for a vma whose range covers vfn. If the page is unmapped,
 * return NULL. */
/* Author : Aditya Parikh
 *  done the required
 */
vmarea_t *
vmmap_lookup(vmmap_t *map, uint32_t vfn)
{
 KASSERT(NULL != map);
dbg(DBG_ELF,"vmmap_lookup: map is not null\n");
	list_link_t *link;
	vmarea_t *vma;

	for (link = map->vmm_list.l_next ; link != &(map->vmm_list) ; link = link->l_next)
	{
		vma = list_item(link, vmarea_t, vma_plink);
		if(vfn>=vma->vma_start && vfn<=vma->vma_end)
		{
			return vma;	
		}
	}
	return NULL;

        /*NOT_YET_IMPLEMENTED("VM: vmmap_lookup");
        return NULL;*/
}

/* Allocates a new vmmap containing a new vmarea for each area in the
 * given map. The areas should have no mmobjs set yet. Returns pointer
 * to the new vmmap on success, NULL on failure. This function is
 * called when implementing fork(2). */
vmmap_t *
vmmap_clone(vmmap_t *map)
{
	list_link_t *link;
	vmarea_t *oldvma,*newvma;
	vmmap_t *newvmm = vmmap_create();
	if(newvmm)
	{
		for (link = map->vmm_list.l_next ; link != &(map->vmm_list) ; link = link->l_next)
		{
			oldvma = list_item(link, vmarea_t, vma_plink);
			newvma = vmarea_alloc();
			if(!newvma)
			{
				/* free previously assigned resources*/
				return NULL;
			}
			newvma->vma_start = oldvma->vma_start;   
			newvma->vma_end = oldvma->vma_end;     
			newvma->vma_off = oldvma->vma_off;     
			newvma->vma_prot = oldvma->vma_prot;    
			newvma->vma_flags = oldvma->vma_flags;   
			newvma->vma_vmmap = newvmm;   
			list_link_init(&(newvma->vma_olink));
			list_insert_before(&(newvmm->vmm_list), &(newvma->vma_plink));
			link=link->l.next;
		}
	}
	else	
		return NULL;
	
	return newvmm;		
        /*NOT_YET_IMPLEMENTED("VM: vmmap_clone");
        return NULL;*/
}

/* Insert a mapping into the map starting at lopage for npages pages.
 * If lopage is zero, we will find a range of virtual addresses in the
 * process that is big enough, by using vmmap_find_range with the same
 * dir argument.  If lopage is non-zero and the specified region
 * contains another mapping that mapping should be unmapped.
 *
 * If file is NULL an anon mmobj will be used to create a mapping
 * of 0's.  If file is non-null that vnode's file will be mapped in
 * for the given range.  Use the vnode's mmap operation to get the
 * mmobj for the file; do not assume it is file->vn_obj. Make sure all
 * of the area's fields except for vma_obj have been set before
 * calling mmap.
 *
 * If MAP_PRIVATE is specified set up a shadow object for the mmobj.
 *
 * All of the input to this function should be valid (KASSERT!).
 * See mmap(2) for for description of legal input.
 * Note that off should be page aligned.
 *
 * Be very careful about the order operations are performed in here. Some
 * operation are impossible to undo and should be saved until there
 * is no chance of failure.
 *
 * If 'new' is non-NULL a pointer to the new vmarea_t should be stored in it.
 */

/* Author : Aditya Parikh
 * need to implement error checks looking at mmap(2) I think needs major recoding w.r.t. shadow objects and underlying objects
 */
int
vmmap_map(vmmap_t *map, vnode_t *file, uint32_t lopage, uint32_t npages,
          int prot, int flags, off_t off, int dir, vmarea_t **new)
{

KASSERT(NULL != map);
dbg(DBG_ELF,"vmmap_map: map is not null\n");
KASSERT(0 < npages);
dbg(DBG_ELF,"vmmap_map: npages > 0\n");
KASSERT(!(~(PROT_NONE | PROT_READ | PROT_WRITE | PROT_EXEC) & prot));
dbg(DBG_ELF,"vmmap_map: prot is either of PROT_NONE, PROT_READ, PROT_WRITE or PROT_EXEC\n");
KASSERT((MAP_SHARED & flags) || (MAP_PRIVATE & flags));
dbg(DBG_ELF,"vmmap_map: flags is either of MAP_SHARED or MAP_PRIVATE\n");
KASSERT((0 == lopage) || (ADDR_TO_PN(USER_MEM_LOW) <= lopage));
dbg(DBG_ELF,"vmmap_map: lopage is 0 or greater than or equal to USER_MEM_LOW\n");
KASSERT((0 == lopage) || (ADDR_TO_PN(USER_MEM_HIGH) >= (lopage + npages)));
dbg(DBG_ELF,"vmmap_map: lopage is 0 or lopage + npages is less than or equal to USER_MEM_HIGH\n");
KASSERT(PAGE_ALIGNED(off));
dbg(DBG_ELF,"vmmap_map: offset is aligned\n");

	vmarea_t *newvma;
	file_t *f;
	int status;
	if(lopage==0)
	{
		lopage = vmmap_find_range(map, npages, dir);
		off = lopage;
		if(!lopage)
			return NULL;
	}
	newvma = vmarea_alloc();
	if(!newvma)
		return NULL;
	else
	{	
			status = vmmap_is_range_empty(map, lopage, npages);
		if(status == 0)
		{

				status = vmmap_remove(map, lopage, npages);
				if(status==-1)
				{
					return -1;/* return error code which vmmap_remove sets */
				}
		}

		newvma->vma_start = lopage;   
		newvma->vma_end = lopage + npages -1;     
		newvma->vma_off = off;     
		newvma->vma_prot = prot;    
		newvma->vma_flags = flags;   
		newvma->vma_vmmap = map;   
		vmmap_insert(map, newvma);

	}		
	if(file)
	{
		status = file->vn_ops->mmap(file, newvma, &(newvma->vma_obj));
		if(status<0)
		{
			return -1;/* return error code */
		}
		newvma->vma_obj->mmo_ops->ref(newvma->vma_obj);
		/* NOTE not sure abt increasing reference count of vnode also*/
	}
	else
	{
		/*not sure if have to create shadow obj under anon or both separately */
		newvma->vma_obj =  anon_create();
/*		newvma->vma_obj->mmo_ops->ref(newvma->vma_obj);*/
		/* assuming create will not up the ref count */
		
		/* comment on google group says Create an anonymous object, initialize vma_obj of new vmarea to this anonymous object, set offset to 0, protection to PROT_NONE,  flags to 0*/
		newvma->vma_off = 0;     
		newvma->vma_prot = PROT_NONE;    
		newvma->vma_flags = 0;
	}
	list_insert_tail(&(newvma->vma_obj->mmo_un.mmo_vmas),&(newvma->vma_olink));
	if(flags & MAP_PRIVATE)
	{
		mmobj_t *shadowo;			
		shadowo =  shadow_create();
/*		shadowo->mmo_ops->ref(shadowo);*/
		shadowo->mmo_un.mmo_bottom_obj = newvma->vma_obj;

		shadowo->mmo_shadowed = newvma->vma_obj;
		newvma->vma_obj = shadowo;

		/* assuming create will not up the ref count */
	}
	else
	{

	}
	new = &newvma;	
	return 0;        /* meaning successful*/

	/*NOT_YET_IMPLEMENTED("VM: vmmap_map");
        return -1;*/
}

/*
 * We have no guarantee that the region of the address space being
 * unmapped will play nicely with our list of vmareas.
 *
 * You must iterate over each vmarea that is partially or wholly covered
 * by the address range [addr ... addr+len). The vm-area will fall into one
 * of four cases, as illustrated below:
 *
 * key:
 *          [             ]   Existing VM Area
 *        *******             Region to be unmapped
 *
 * Case 1:  [   ******    ]
 * The region to be unmapped lies completely inside the vmarea. We need to
 * split the old vmarea into two vmareas. be sure to increment the
 * reference count to the file associated with the vmarea.
 *
 * Case 2:  [      *******]**
 * The region overlaps the end of the vmarea. Just shorten the length of
 * the mapping.
 *
 * Case 3: *[*****        ]
 * The region overlaps the beginning of the vmarea. Move the beginning of
 * the mapping (remember to update vma_off), and shorten its length.
 *
 * Case 4: *[*************]**
 * The region completely contains the vmarea. Remove the vmarea from the
 * list.
 */
int
vmmap_remove(vmmap_t *map, uint32_t lopage, uint32_t npages)
{
	uint32_t start,end,vma_start,vma_end;
	list_link_t *link;
	vmarea_t *vma,*newvma1,*newvma2;

	start = lopage;
	end = start + npages - 1;
	dbg(DBG_ELF, "REMVE1\n");
	for (link = map->vmm_list.l_next ; link != &(map->vmm_list) ;)
	{
	dbg(DBG_ELF, "REMVEa\n");	
	vma = list_item(link, vmarea_t, vma_plink);
	dbg(DBG_ELF, "REMVEc\n");
		if( start > vma->vma_start && end<vma->vma_end ) /* case 1 */
		{
	dbg(DBG_ELF, "REMVEb\n");
			newvma1 = vmarea_alloc();
			newvma2 = vmarea_alloc();
			if(!newvma1 || !newvma2)
				return -1;			
			newvma1->vma_start = vma->vma_start;   
			newvma1->vma_end = start - 1;     
			newvma1->vma_off = vma->vma_off;     
			newvma1->vma_prot = vma->vma_prot;    
			newvma1->vma_flags = vma->vma_flags;   
			newvma1->vma_vmmap = map; 
			newvma1->vma_obj  = vma->vma_obj;
			newvma1->vma_olink = vma->vma_olink;
	dbg(DBG_ELF, "REMVE2\n");
			list_insert_before(link, &(newvma1->vma_plink));
				dbg(DBG_ELF, "REMVE3\n");
			newvma2->vma_start = end + 1;   
			newvma2->vma_end = vma->vma_end;     
			newvma2->vma_off = vma->vma_off;     
			newvma2->vma_prot = vma->vma_prot;    
			newvma2->vma_flags = vma->vma_flags;   
			newvma2->vma_vmmap = map; 
			newvma2->vma_obj  = vma->vma_obj;
			newvma2->vma_olink = vma->vma_olink;
	dbg(DBG_ELF, "REMVE4\n");
			list_insert_before(link, &(newvma2->vma_plink));
				dbg(DBG_ELF, "REMVE5\n");
			/* increasing ref count by as 2 are referencing the object now */
			vma->vma_obj->mmo_ops->ref(vma->vma_obj);
			list_link_t *remove_link = link;
			link = link->l_next;
			list_remove(remove_link); 
			vmarea_free(vma);			
		}
		else if( start>vma->vma_start && start<=vma->vma_end && end>=vma->vma_end ) /* case 2 */
		{
	dbg(DBG_ELF, "REMVEd\n");
			vma->vma_end = start - 1;
			link = link->l_next;
		}
		else if( start<=vma->vma_start && end<vma->vma_end && end>=vma->vma_start ) /* case 3 */
		{
dbg(DBG_ELF, "REMVEe\n");
			vma->vma_off = vma->vma_off + (end + 1 - vma->vma_start);			
			vma->vma_start = end + 1;
			link = link->l_next;
		}
		else if(start<=vma->vma_start && end>=vma->vma_end) /* case 4 */
		{
dbg(DBG_ELF, "REMVEf\n");
			/*vma->vma_obj->mmo_ops->put(vma->vma_obj);*/
dbg(DBG_ELF, "REMVE f 1\n");
			list_link_t *remove_link = link;
			link = link->l_next;
			list_remove(remove_link); 
dbg(DBG_ELF, "REMVE f 2 \n");
			vmarea_free(vma);
dbg(DBG_ELF, "REMVE f 3\n");
		}
		else if( start<vma->vma_start && end<vma->vma_start ) /* case 5 */
		{
dbg(DBG_ELF, "REMVEg\n");
			return 0;
		}
		link = link->l_next;
dbg(DBG_ELF, "REMVEh\n");
	}
	
	return 0;
  	/*NOT_YET_IMPLEMENTED("VM: vmmap_remove");
        return -1;*/
}

/*
 * Returns 1 if the given address space has no mappings for the
 * given range, 0 otherwise.
 */
int
vmmap_is_range_empty(vmmap_t *map, uint32_t startvfn, uint32_t npages)
{
	list_link_t *link;
	vmarea_t *vma;
	uint32_t start,end;
	start = startvfn;
	end = start + npages - 1;
	dbg(DBG_ELF, "EMPTY1\n");	
KASSERT((startvfn <= end) && (ADDR_TO_PN(USER_MEM_LOW) <= startvfn) && (ADDR_TO_PN(USER_MEM_HIGH) >= end));
dbg(DBG_ELF,"vmmap_is_range_empty: end <= start and USER_MEM_LOW <= start and USER_MEM_HIGH >= end\n");

	for (link = map->vmm_list.l_next ; link != &(map->vmm_list) ; link = link->l_next)
	{
		vma = list_item(link, vmarea_t, vma_plink);
		if( (start>=vma->vma_start && start<=vma->vma_end ) || (end>=vma->vma_start && end<=vma->vma_end ))
		{
			return 0;	
		}
	}
	return 1;
        /*NOT_YET_IMPLEMENTED("VM: vmmap_is_range_empty");
        return 0;*/
}

/* Read into 'buf' from the virtual address space of 'map' starting at
 * 'vaddr' for size 'count'. To do so, you will want to find the vmareas
 * to read from, then find the pframes within those vmareas corresponding
 * to the virtual addresses you want to read, and then read from the
 * physical memory that pframe points to. You should not check permissions
 * of the areas. Assume (KASSERT) that all the areas you are accessing exist.
 * Returns 0 on success, -errno on error.
 */
int
vmmap_read(vmmap_t *map, const void *vaddr, void *buf, size_t count)
{
	list_link_t *link;
	int status;
	vmarea_t *vma;
	pframe_t *pfrm;
	uint32_t start,end,i;
	start = ADDR_TO_PN(vaddr); /* not sure whether to pass *vaddr or vaddr */
	end = ADDR_TO_PN((uint32_t)vaddr + count -1); /* not sure whether to pass *vaddr or vaddr */

	for (link = map->vmm_list.l_next ; link != &(map->vmm_list) ; link = link->l_next)
	{
		vma = list_item(link, vmarea_t, vma_plink);
		if( start>=vma->vma_start && start<=vma->vma_end )
		{
			/* read from vma */
			for(i=start;i<=end && i<=vma->vma_end;i++)
			{
				status = vma->vma_obj->mmo_ops->lookuppage(vma->vma_obj, i-vma->vma_start+vma->vma_off, 0, &pfrm);
				if(status<0)
					return status;
				
				memcpy(buf, pfrm->pf_addr+PAGE_OFFSET(pfrm->pf_addr), PAGE_SIZE-PAGE_OFFSET(pfrm->pf_addr)); /*not sure whether dereferencing is needed in page_offset */
				buf = buf + PAGE_SIZE-PAGE_OFFSET(pfrm->pf_addr);
			}			
						
			start = vma->vma_end + 1;
		}
		if(start > end)
		{
			/* all related vma over*/
			return 0;
		}
		
	}
	return -1;/* some error code saying not read enough bytes or not read at all */
        /*NOT_YET_IMPLEMENTED("VM: vmmap_read");
        return 0;*/
}

/* Write from 'buf' into the virtual address space of 'map' starting at
 * 'vaddr' for size 'count'. To do this, you will need to find the correct
 * vmareas to write into, then find the correct pframes within those vmareas,
 * and finally write into the physical addresses that those pframes correspond
 * to. You should not check permissions of the areas you use. Assume (KASSERT)
 * that all the areas you are accessing exist. Remember to dirty pages!
 * Returns 0 on success, -errno on error.
 */
int
vmmap_write(vmmap_t *map, void *vaddr, const void *buf, size_t count)
{
dbg(DBG_ELF, "WRITE 1\n");	
	list_link_t *link;
	int status;
	vmarea_t *vma;
	pframe_t *pfrm;
	uint32_t start,end,i;
	start = ADDR_TO_PN(vaddr); /* not sure whether to pass *vaddr or vaddr */
	end = ADDR_TO_PN((uint32_t)vaddr + count -1); /* not sure whether to pass *vaddr or vaddr */
dbg(DBG_ELF, "List Length %d\n",list_empty(&(map->vmm_list)));
	for (link = map->vmm_list.l_next ; link != &(map->vmm_list) ; link = link->l_next)
	{
dbg(DBG_ELF, "WRITE 2\n");
		vma = list_item(link, vmarea_t, vma_plink);
dbg(DBG_ELF, "WRITE 2a\n");
		if( start>=vma->vma_start && start<=vma->vma_end )
		{
			/* read from vma */
dbg(DBG_ELF, "WRITE 2b\n");
			for(i=start;i<=end && i<=vma->vma_end;i++)
			{
dbg(DBG_ELF, "WRITE 2c\n");
				status = vma->vma_obj->mmo_ops->lookuppage(vma->vma_obj, i-vma->vma_start+vma->vma_off, 1, &pfrm);
dbg(DBG_ELF, "WRITE 2d\n");					
				if(status<0)
					return status; /* don't know what to revert in case of failure */
			dbg(DBG_ELF, "WRITE 3\n");		
				memcpy(pfrm->pf_addr+PAGE_OFFSET(pfrm->pf_addr), buf, PAGE_SIZE-PAGE_OFFSET(pfrm->pf_addr)); /*not sure whether dereferencing is needed in page_offset */
			dbg(DBG_ELF, "WRITE 4\n");		
				pframe_set_dirty(pfrm);
				buf = buf + PAGE_SIZE-PAGE_OFFSET(pfrm->pf_addr);
			dbg(DBG_ELF, "WRITE 5\n");		
			}			
						
			start = vma->vma_end + 1;
			dbg(DBG_ELF, "WRITE 6\n");		
		}
		if(start > end)
		{
			/* all related vma over*/
			dbg(DBG_ELF, "WRITE 7\n");		
			return 0;
		}
		
	}
	return -1;/* some error code saying not write enough bytes or not read at all */


        /*NOT_YET_IMPLEMENTED("VM: vmmap_write");
        return 0;*/
}

/* a debugging routine: dumps the mappings of the given address space. */
size_t
vmmap_mapping_info(const void *vmmap, char *buf, size_t osize)
{
        KASSERT(0 < osize);
        KASSERT(NULL != buf);
        KASSERT(NULL != vmmap);

        vmmap_t *map = (vmmap_t *)vmmap;
        vmarea_t *vma;
        ssize_t size = (ssize_t)osize;

        int len = snprintf(buf, size, "%21s %5s %7s %8s %10s %12s\n",
                           "VADDR RANGE", "PROT", "FLAGS", "MMOBJ", "OFFSET",
                           "VFN RANGE");

        list_iterate_begin(&map->vmm_list, vma, vmarea_t, vma_plink) {
                size -= len;
                buf += len;
                if (0 >= size) {
                        goto end;
                }

                len = snprintf(buf, size,
                               "%#.8x-%#.8x  %c%c%c  %7s 0x%p %#.5x %#.5x-%#.5x\n",
                               vma->vma_start << PAGE_SHIFT,
                               vma->vma_end << PAGE_SHIFT,
                               (vma->vma_prot & PROT_READ ? 'r' : '-'),
                               (vma->vma_prot & PROT_WRITE ? 'w' : '-'),
                               (vma->vma_prot & PROT_EXEC ? 'x' : '-'),
                               (vma->vma_flags & MAP_SHARED ? " SHARED" : "PRIVATE"),
                               vma->vma_obj, vma->vma_off, vma->vma_start, vma->vma_end);
        } list_iterate_end();

end:
        if (size <= 0) {
                size = osize;
                buf[osize - 1] = '\0';
        }
        /*
        KASSERT(0 <= size);
        if (0 == size) {
                size++;
                buf--;
                buf[0] = '\0';
        }
        */
        return osize - size;
}
