#include "globals.h"
#include "errno.h"

#include "util/string.h"
#include "util/debug.h"

#include "mm/mmobj.h"
#include "mm/pframe.h"
#include "mm/mm.h"
#include "mm/page.h"
#include "mm/slab.h"
#include "mm/tlb.h"

#include "vm/vmmap.h"
#include "vm/shadow.h"
#include "vm/shadowd.h"

#define SHADOW_SINGLETON_THRESHOLD 5

int shadow_count = 0; /* for debugging/verification purposes */
#ifdef __SHADOWD__
/*
 * number of shadow objects with a single parent, that is another shadow
 * object in the shadow objects tree(singletons)
 */
static int shadow_singleton_count = 0;
#endif

static slab_allocator_t *shadow_allocator;

static void shadow_ref(mmobj_t *o);
static void shadow_put(mmobj_t *o);
static int  shadow_lookuppage(mmobj_t *o, uint32_t pagenum, int forwrite, pframe_t **pf);
static int  shadow_fillpage(mmobj_t *o, pframe_t *pf);
static int  shadow_dirtypage(mmobj_t *o, pframe_t *pf);
static int  shadow_cleanpage(mmobj_t *o, pframe_t *pf);

static mmobj_ops_t shadow_mmobj_ops = {
        .ref = shadow_ref,
        .put = shadow_put,
        .lookuppage = shadow_lookuppage,
        .fillpage  = shadow_fillpage,
        .dirtypage = shadow_dirtypage,
        .cleanpage = shadow_cleanpage
};

/*
 * This function is called at boot time to initialize the
 * shadow page sub system. Currently it only initializes the
 * shadow_allocator object.
 */
void
shadow_init()
{	
	shadow_allocator = slab_allocator_create("mmobj", sizeof(mmobj_t));
	KASSERT(shadow_allocator);
	dbg(DBG_ELF,"shadow_init: shadow_ allocator is not null\n");
        /*NOT_YET_IMPLEMENTED("VM: shadow_init");*/
}

/*
 * You'll want to use the shadow_allocator to allocate the mmobj to
 * return, then then initialize it. Take a look in mm/mmobj.h for
 * macros which can be of use here. Make sure your initial
 * reference count is correct.
 */
mmobj_t *
shadow_create()
{
	mmobj_t *shadowo = slab_obj_alloc(shadow_allocator);
if(NULL==shadowo)
panic("SHADOWO NULL!!\n");
dbg(DBG_PRINT,"SHADOWO not NULL!!\n");
mmobj_init(shadowo, &shadow_mmobj_ops);
if(NULL==shadowo->mmo_ops)
panic("mmops NULL\n");
dbg(DBG_PRINT,"mmops not NULL!!\n");

if(NULL==shadowo->mmo_ops->ref)
panic("ref NULL\n");
dbg(DBG_PRINT,"ref not NULL!!\n");

	return shadowo;
        /*NOT_YET_IMPLEMENTED("VM: shadow_create");
        return NULL;*/
}

/* Implementation of mmobj entry points: */

/*
 * Increment the reference count on the object.
 */
static void
shadow_ref(mmobj_t *o)
{
	KASSERT(o && (0 <= o->mmo_refcount) && (&shadow_mmobj_ops == o->mmo_ops));
	dbg(DBG_ELF,"shadow_ref: object pointer is not null & object's mmo_refcount >0 & object's mmo_ops is equal to shadow_mmobj_ops\n");
	o->mmo_refcount++;

        /*NOT_YET_IMPLEMENTED("VM: shadow_ref");*/
}

/*
 * Decrement the reference count on the object. If, however, the
 * reference count on the object reaches the number of resident
 * pages of the object, we can conclude that the object is no
 * longer in use and, since it is a shadow object, it will never
 * be used again. You should unpin and uncache all of the object's
 * pages and then free the object itself.
 */
static void
shadow_put(mmobj_t *o)
{
KASSERT(o && (0 < o->mmo_refcount) && (&shadow_mmobj_ops == o->mmo_ops));
	if(o->mmo_refcount < o->mmo_nrespages)
		return;
	o->mmo_refcount--;
	if(o->mmo_refcount <= o->mmo_nrespages)
	{
		list_link_t *link;
		pframe_t *pfrm;
		for(link = o->mmo_respages.l_next ; link!=&(o->mmo_respages) ; )
		{
			list_link_t *remove_link = link;
			link = link->l_next;
			pfrm = list_item(remove_link, pframe_t, pf_olink);
			while(pfrm->pf_pincount!=0)
				pframe_unpin(pfrm);
		}
		for(link = o->mmo_respages.l_next ; link!=&(o->mmo_respages) ; )
		{
			list_link_t *remove_link = link;
			link = link->l_next;
			pfrm = list_item(remove_link, pframe_t, pf_link);
			if(pfrm->pf_pincount==0 && !pframe_is_free(pfrm) && !pframe_is_busy(pfrm))
				pframe_free(pfrm);
			slab_obj_free(shadow_allocator, o);			
		}
	}
        /*NOT_YET_IMPLEMENTED("VM: shadow_put");*/
}

/* This function looks up the given page in this shadow object. The
 * forwrite argument is true if the page is being looked up for
 * writing, false if it is being looked up for reading. This function
 * must handle all do-not-copy-on-not-write magic (i.e. when forwrite
 * is false find the first shadow object in the chain which has the
 * given page resident). copy-on-write magic (necessary when forwrite
 * is true) is handled in shadow_fillpage, not here. */
static int
shadow_lookuppage(mmobj_t *o, uint32_t pagenum, int forwrite, pframe_t **pf)
{
dbg(DBG_ELF, "lookuppage before lookup1, searching for pagenum : %d\n",pagenum);
	if(forwrite==0)
	{
dbg(DBG_ELF, "lookuppage 1, searching for pagenum : %d\n",pagenum);

		list_link_t *link;
		pframe_t *pfrm;
		for(link = o->mmo_respages.l_next ; link!=&(o->mmo_respages) ; link = link->l_next)
		{
			pfrm = list_item(link, pframe_t, pf_olink);
			dbg(DBG_ELF, "lookuppage 2, found pagenum : %d\n",pfrm->pf_pagenum);
			if(pfrm->pf_pagenum == pagenum)
			{
				pf = &pfrm;
	dbg(DBG_ELF, "before return - pf Page num and pf addr ---%d and %d \n",(*pf)->pf_pagenum,*(int *)((*pf)->pf_addr));
				return 0;
			}
		}
		if(o->mmo_shadowed==NULL)		
			return -1;

		return shadow_lookuppage(o->mmo_shadowed, pagenum, 0, pf);
	}
	else
	{
dbg(DBG_ELF, "lookuppage 1\n");
		int status = pframe_get(o, pagenum, pf);
dbg(DBG_ELF, "frame get  - Pagenum and pf pagenum ---%d and %d \n",pagenum,(*pf)->pf_pagenum);
		if(status<0 && pf==NULL)
			return -1;
		else if(status<0)
			dbg(DBG_ELF, "lookuppage 2\n");
	dbg(DBG_ELF, "lookuppage 3\n");
	int x=shadow_fillpage(o, *pf);
dbg(DBG_ELF, "before return - pf Page num and pf addr ---%d and %d \n",(*pf)->pf_pagenum,*(int *)((*pf)->pf_addr));
		return x; /* change a something */
	}

        /*NOT_YET_IMPLEMENTED("VM: shadow_lookuppage");
        return 0;*/
}

/* As per the specification in mmobj.h, fill the page frame starting
 * at address pf->pf_addr with the contents of the page identified by
 * pf->pf_obj and pf->pf_pagenum. This function handles all
 * copy-on-write magic (i.e. if there is a shadow object which has
 * data for the pf->pf_pagenum-th page then we should take that data,
 * if no such shadow object exists we need to follow the chain of
 * shadow objects all the way to the bottom object and take the data
 * for the pf->pf_pagenum-th page from the last object in the chain). */
static int
shadow_fillpage(mmobj_t *o, pframe_t *pf)
{
/*KASSERT(pframe_is_busy(pf));*/
KASSERT(!pframe_is_pinned(pf));
	list_link_t *link;
	pframe_t *pfrm;
dbg(DBG_ELF, "fillpage 1\n");
dbg(DBG_ELF, "Pagenum before List Respages length %d \n",pf->pf_pagenum);
	dbg(DBG_ELF, "List Respages Length %d\n",list_empty(&(o->mmo_respages)));
	for(link = o->mmo_respages.l_next ; link!=&(o->mmo_respages) ; link = link->l_next)
	{
		pfrm = list_item(link, pframe_t, pf_olink);
dbg(DBG_ELF, "fillpage 2\n");
		if(pfrm->pf_pagenum == pf->pf_pagenum)
		{
dbg(DBG_ELF, "fillpage 1\n");
			memcpy(pf->pf_addr+PAGE_OFFSET(pf->pf_addr),pfrm->pf_addr + PAGE_OFFSET(pfrm->pf_addr),PAGE_SIZE - PAGE_OFFSET(pfrm->pf_addr));
	dbg(DBG_ELF, "pf Page num and pf addr ---%d and %d \n",pf->pf_pagenum,*(int *)(pf->pf_addr));
			return 0;
		}
	}
	if(o->mmo_shadowed==NULL)		
		return -1;
	return shadow_fillpage(o->mmo_shadowed, pf);	
        /*NOT_YET_IMPLEMENTED("VM: shadow_fillpage");
        return 0;*/
}

/* These next two functions are not difficult. */

static int
shadow_dirtypage(mmobj_t *o, pframe_t *pf)
{
	int status = pframe_get(o, pf->pf_pagenum, &pf);
	if(status<0)
		return status;
	pframe_set_dirty(pf); 
	return 0;
        /*NOT_YET_IMPLEMENTED("VM: shadow_dirtypage");
        return -1;*/
}

static int
shadow_cleanpage(mmobj_t *o, pframe_t *pf)
{
	pframe_t *pfrm;
	int status = pframe_get(o, pf->pf_pagenum, &pfrm);
	if(status<0)
		return status;
	memcpy(pfrm->pf_addr+PAGE_OFFSET(pfrm->pf_addr),pf->pf_addr+PAGE_OFFSET(pf->pf_addr),PAGE_SIZE-PAGE_OFFSET(pf->pf_addr));
        /*NOT_YET_IMPLEMENTED("VM: shadow_cleanpage");
        return -1;*/
}
