/* vmLibP.h - private header for architecture independent mmu interface */

/* 
 * Copyright (c) 1995-2005 Wind River Systems, Inc. 
 *
 * The right to copy, distribute, modify or otherwise make use 
 * of this software may be licensed only pursuant to the terms 
 * of an applicable Wind River license agreement. 
 */ 

/*
modification history
--------------------
01x,11aug05,zl   moved common VM macros from the source files
01w,10jun05,jmp  updated VM_LIB_INFO to support vmContigBlockEach(), added
                 VM_CONTIG_BLOCK_EACH() macro.
01v,03jun05,yvp  Added #ifndef	_ASMLANGUAGE. 
                 Updated copyright. #include now with angle-brackets.
01u,07feb05,zl   moved VM_CONTEXT from vmLib.h
01t,12oct04,pcs  Remove struct entries pVmCtxLockRtn & pVmPageProtectRtn since
                 they are really not being used.
01s,30sep04,pcs  Moved VM_CTX_MASK_XXX macros from vmLib.h to here.
01r,20sep04,tam  added vmInfoGet() and VM_INFO
01q,07jul04,aeg  removed VM_CONTEXT_CLASS related definitions.
01p,03may04,pcs  Add support for optimize/lock/unlock feature's.
01o,29apr04,pcs  Add a macro to get max physical address bit's supported by
                 the CPU.
01n,01apr04,zl   added mmuSoftCfgEnabled to VM_LIB_INFO.
01m,19feb04,yp  adding data struct for tracking extra mappings
01l,10nov03,yp   adding addrSpaceSegInfo structure
01k,30oct03,sru  remove MMU_TRANS_TBL typedef
01j,15oct03,sru  fix prototypes for PHYS_ADDR
01i,19sep03,pcs  Implement code review changes.
01h,11jul03,pcs  Port to Base 6.
01h,22may03,dcc  added prototypes of vmContextTerminate () and 
		 vmContextDestroy ().
01g,13feb93,kdl  fixed test in VM_TEXT_PAGE_PROTECT macro.
01f,10feb93,rdc  added support for text page protection routine. 
01e,08oct92,rdc  fixed VM_TRANSLATE macro.
01d,22sep92,rrr  added support for c++
01c,30jul92,rdc  added pointer to vmTranslate routine to VM_LIB_INFO.
01b,27jul92,rdc  modified VM_LIB_INFO.
01a,10jul92,rdc  written.
*/

#ifndef __INCvmLibPh
#define __INCvmLibPh

#ifdef __cplusplus
extern "C" {
#endif


#include <vxWorks.h>
#include <memLib.h>
#include <lstLib.h>
#include <private/objLibP.h>
#include <private/classLibP.h>
#include <private/semLibP.h>
#include <vmLib.h>
#include <mmuLib.h>


#ifndef	_ASMLANGUAGE

/* compute if page is aligned without using "%" (SPR 6833) */

#define NOT_PAGE_ALIGNED(addr)  (((UINT)(addr)) & ((UINT)vmPageSize - 1))

/* Convert from state to index into architecture dependent state array */

#define STATE_TO_PROT_INDEX(state) ((MMU_ATTR_PROT_GET(state)) >> \
				    MMU_ATTR_PROT_INDEX_SHIFT)

#define STATE_TO_CACHE_INDEX(state) ((MMU_ATTR_CACHE_GET(state)) >> \
				    MMU_ATTR_CACHE_INDEX_SHIFT)

#define STATE_TO_VALID_INDEX(state) ((MMU_ATTR_VALID_GET(state)) >> \
				    MMU_ATTR_VALID_INDEX_SHIFT)

/* Convert from architecture dependent state array index to state */

#define PROT_INDEX_TO_STATE(index) (((UINT)(index) << \
				     MMU_ATTR_PROT_INDEX_SHIFT) | \
				    MMU_ATTR_PROT_SUP_READ)

#define CACHE_INDEX_TO_STATE(index) ((UINT)index << MMU_ATTR_CACHE_INDEX_SHIFT)

#define VALID_INDEX_TO_STATE(index) ((UINT)index << MMU_ATTR_VALID_INDEX_SHIFT)

/* Convert from mask to index into architecture dependent mask array */

#define STATEMASK_TO_INDEX(statemask) ((MMU_ATTR_MSK_ID_GET(statemask)) >> \
				       MMU_ATTR_MSK_INDEX_SHIFT)


/* macros to reference mmuLib routines indirectly through mmuLibFunc table */

#define MMU_PAGE_SIZE_GET       (*(mmuLibFuncs.mmuPageSizeGet))
#define MMU_ENABLE 	  	(*(mmuLibFuncs.mmuEnable))
#define MMU_TRANS_TBL_CREATE 	(*(mmuLibFuncs.mmuTransTblCreate))
#define MMU_STATE_SET 	  	(*(mmuLibFuncs.mmuStateSet))
#define MMU_CURRENT_SET 	(*(mmuLibFuncs.mmuCurrentSet))
#define MMU_GLOBAL_PAGE_MAP 	(*(mmuLibFuncs.mmuGlobalPageMap))
#define MMU_ATTR_TRANSLATE      (*(mmuLibFuncs.mmuAttrTranslate))

#define MMU_PAGE_SIZE_GET       (*(mmuLibFuncs.mmuPageSizeGet))
#define MMU_ENABLE 	  	(*(mmuLibFuncs.mmuEnable))
#define MMU_STATE_SET 	  	(*(mmuLibFuncs.mmuStateSet))
#define MMU_STATE_GET 	  	(*(mmuLibFuncs.mmuStateGet))
#define MMU_PAGE_MAP 	  	(*(mmuLibFuncs.mmuPageMap))
#define MMU_PAGE_UNMAP 	  	(*(mmuLibFuncs.mmuPageUnMap))
#define MMU_TRANSLATE 	  	(*(mmuLibFuncs.mmuTranslate))
#define MMU_PHYS_TRANSLATE  	(*(mmuLibFuncs.mmuPhysTranslate))
#define MMU_ATTR_TRANSLATE      (*(mmuLibFuncs.mmuAttrTranslate))
#define MMU_PAGE_OPTIMIZE       (*(mmuLibFuncs.mmuPageOptimize))
#define MMU_BUFFER_WRITE        (*(mmuLibFuncs.mmuBufferWrite))
#define MMU_PAGE_LOCK           (*(mmuLibFuncs.mmuPageLock))
#define MMU_PAGE_UNLOCK         (*(mmuLibFuncs.mmuPageUnlock))
#define MMU_MAX_PHYS_BITS_GET   (*(mmuLibFuncs.mmuMaxPhysBitsGet))

#define MMU_TRANS_TBL_DELETE 	(*(mmuLibFuncs.mmuTransTblDelete))
#define MMU_TRANS_TBL_UNION 	(*(mmuLibFuncs.mmuTransTblUnion))
#define MMU_TRANS_TBL_MASK 	(*(mmuLibFuncs.mmuTransTblMask))
#define MMU_TRANS_TBL_GET       (*(mmuLibFuncs.mmuTransTblGet))
#define MMU_ATTACH_INIT         (*(mmuLibFuncs.mmuTransTblUnionInit))
#define MMU_PAGE_BLOCK_SIZE_GET (*(mmuLibFuncs.mmuBlockSizeGet))

/* 
 * An instance of VM_LIB_INFO is defined in usrConfig.c and is initialized
 * if vmLib has been installed. If the bundled version of vmLib.c is installed,
 * only a subset of the routines will be available - if the pointers are null,
 * then the routines are not available.
 */

typedef struct
    {
    BOOL vmLibInstalled;       /* full mmu support available                  */
    BOOL vmBaseLibInstalled;   /* base mmu support available                  */
    BOOL vmMpuLibInstalled;    /* MPU support available                       */
    BOOL protectTextSegs;      /* TRUE == protext text segments               */
    BOOL mmuSoftCfgEnabled;    /* TRUE: software MMU simulation configuration */

    FUNCPTR pVmStateSetRtn;    /* pointer to vmStateSet if vmLib included     */
    FUNCPTR pVmStateGetRtn;    /* pointer to vmStateGet if vmLib included     */
    FUNCPTR pVmEnableRtn;      /* pointer to vmEnable if vmLib included       */
    FUNCPTR pVmPageSizeGetRtn; /* ptr to vmPageSizeGet if vmLib included      */
    FUNCPTR pVmTranslateRtn;   /* ptr vmTranslate if vmLib included           */
    FUNCPTR pVmTextProtectRtn; /* ptr to vmTextLock routine if vmLib included */

    /**************************************************************************/

    FUNCPTR pVmPageMapRtn;      /* pointer to vmPageMap if vmLib included     */
    FUNCPTR pVmPhysTranslateRtn;/* ptr vmPhysTranslate if vmLib included      */
    FUNCPTR pVmCtxCreateRtn;    /* pointer to vmCtxCreate if vmLib included   */
    FUNCPTR pVmCtxDeleteRtn;    /* pointer to vmCtxDelete if vmLib included   */
    FUNCPTR pVmBufferWriteRtn;  /* ptr to vmBufferWrite routine if vmLib incl */

    FUNCPTR pVmPageOptimizeRtn; /* ptr to vmPageOptimize rtn if vmBaseLib incl*/
    FUNCPTR pVmPageLockRtn;     /* ptr to vmPageLock rtn if vmBaseLib incl    */
    FUNCPTR pVmPageUnlockRtn;   /* ptr to vmPageUnlock rtn if vmBaseLib incl  */
    FUNCPTR pVmMaxPhysBitsGetRtn; /* ptr to vmMaxPhysBitsGet rtn if vmBaseLib
				     incl                                     */

    FUNCPTR pVmPageUnMapRtn;    /* pointer to vmUnMap if vmLib included       */
    FUNCPTR pVmBlockSizeGetRtn; /* ptr to vmPageSizeGet if vmLib included     */
    FUNCPTR pVmTransTblGetRtn;  /* ptr vmTranslate if vmLib included          */
    FUNCPTR pVmCtxUnionRtn;     /* pointer to vmCtxUnion if vmLib included    */
    FUNCPTR pVmCtxUnionInitRtn; /* pointer to vmUnionInit if vmLib included   */
    FUNCPTR pVmCtxMaskRtn;      /* pointer to vmCtxMask if vmLib included     */
    FUNCPTR pVmCtxSwitchRtn;    /* pointer to vmCtxSwitch if vmLib included   */

    FUNCPTR pVmContigBlockEachRtn; /* pointer to vmContigBlockEach if	      */
				/* vmBaseLib included			      */
    } VM_LIB_INFO;


/* Macros used internally by the OS components. */

#define VM_CTX_MASK_OUT        0x0      
#define VM_CTX_MASK_IN         0x1
#define VM_CTX_MASK_DELETE     0x2
#define VM_CTX_MASK_L2_FREE    0x4

/*
 * The following macros should always be used by optional kernel components
 * that needs to call vmLib routines, instead of calling directly the vmLib
 * routines.
 * This in order to route properly the call to either the full implementation
 * of virtual memory management support (INCLUDE_MMU_FULL, that is vmLib.c) or
 * the basic implementation of virtual memory management support
 * (INCLUDE_MMU_BASIC, that is vmBaseLib.c).
 * Additionaly, a default behavior is provided if no virtual memory management
 * support is included.
 * Failure to do so for optional kernel components will break the scalability
 * of the system in regards to virtual memory management support.
 */

#define VM_INSTALLED (vmLibInfo.vmLibInstalled)

#define VM_BASE_INSTALLED (vmLibInfo.vmBaseLibInstalled)

#define VM_MMUSOFT_ENABLED (vmLibInfo.mmuSoftCfgEnabled)

#define VM_STATE_SET(context, pVirtual, len, stateMask, state) \
    ((vmLibInfo.pVmStateSetRtn == NULL) ? \
    (ERROR) : \
    ((*vmLibInfo.pVmStateSetRtn) (context, pVirtual, len, stateMask, state)))

#define VM_STATE_GET(context, pageAddr, pState) \
    ((vmLibInfo.pVmStateGetRtn == NULL) ? \
    (ERROR) : \
    ((*vmLibInfo.pVmStateGetRtn) (context, pageAddr, pState)))

#define VM_ENABLE(enable) \
    ((vmLibInfo.pVmEnableRtn == NULL) ? \
    (ERROR) : \
    ((*vmLibInfo.pVmEnableRtn) (enable)))

#define VM_PAGE_SIZE_GET() \
    ((vmLibInfo.pVmPageSizeGetRtn == NULL) ? \
    (ERROR) : \
    ((*vmLibInfo.pVmPageSizeGetRtn) ()))

#define VM_TRANSLATE(context, virtualAddr, physicalAddr) \
    ((vmLibInfo.pVmTranslateRtn == NULL) ? \
    (ERROR) : \
    ((*vmLibInfo.pVmTranslateRtn) (context, virtualAddr, physicalAddr)))

#define VM_TEXT_PROTECT(addr, protect) \
    ((vmLibInfo.pVmTextProtectRtn == NULL) ? \
    (ERROR) : \
    ((*vmLibInfo.pVmTextProtectRtn) (addr, protect)))

#define VM_TEXT_PAGE_PROTECT(addr, protect) \
    VM_TEXT_PROTECT(addr, protect)

#define VM_PAGE_MAP(context, virtAddr, physAddr, len) \
    ((vmLibInfo.pVmPageMapRtn == NULL) ? \
    (ERROR) : \
    ((*vmLibInfo.pVmPageMapRtn) (context, virtAddr, physAddr, len)))

#define VM_PHYS_TRANSLATE(context, virtualAddr, physicalAddr) \
    ((vmLibInfo.pVmPhysTranslateRtn == NULL) ? \
    (ERROR) : \
    ((*vmLibInfo.pVmPhysTranslateRtn) (context, physicalAddr, virtualAddr)))

/*
 * VM_CONTEXT_BUFFER_WRITE() should be used only in very specific cases where
 * the current context cannot be defined using VM_CURRENT_GET() and therefore
 * the context to use must be passed to vmBufferWrite(). Nevertheless the
 * context ID passed to VM_CONTEXT_BUFFER_WRITE() must correspond to the
 * context in place.
 */

#define VM_CONTEXT_BUFFER_WRITE(context, fromAddr, toAddr, nbBytes) \
    ((vmLibInfo.pVmBufferWriteRtn == NULL) ? \
     (memcpy(toAddr, fromAddr, nbBytes), OK) : \
     ((*vmLibInfo.pVmBufferWriteRtn) (context, fromAddr, toAddr, nbBytes)))

#define VM_CONTEXT_CREATE() \
    ((vmLibInfo.pVmCtxCreateRtn == NULL) ? \
    (ERROR) : \
    ((*vmLibInfo.pVmCtxCreateRtn) ()))

#define VM_CONTEXT_DELETE(context) \
    ((vmLibInfo.pVmCtxDeleteRtn == NULL) ? \
    (ERROR) : \
    ((*vmLibInfo.pVmCtxDeleteRtn) (context)))

#define VM_PAGE_UNMAP(context, virtAddr, len) \
    ((vmLibInfo.pVmPageUnMapRtn == NULL) ? \
    (ERROR) : \
    ((*vmLibInfo.pVmPageUnMapRtn) (context, virtAddr, len)))

#define VM_PAGE_BLOCK_SIZE_GET() \
    ((vmLibInfo.pVmBlockSizeGetRtn == NULL) ? \
    (ERROR) : \
    ((*vmLibInfo.pVmBlockSizeGetRtn) ()))

#define VM_TRANS_TBL_GET(context) \
    ((vmLibInfo.pVmTransTblGetRtn == NULL) ? \
    (ERROR) : \
    ((*vmLibInfo.pVmTransTblGetRtn) (context)))

#define VM_CONTEXT_UNION(context1, context2) \
    ((vmLibInfo.pVmCtxUnionRtn == NULL) ? \
    (ERROR) : \
    ((*vmLibInfo.pVmCtxUnionRtn) (context1, context2)))

#define VM_CONTEXT_UNION_INIT(context, startAddr, len) \
    ((vmLibInfo.pVmCtxUnionInitRtn == NULL) ? \
    (ERROR) : \
    ((*vmLibInfo.pVmCtxUnionInitRtn) (context, startAddr, len)))

#define VM_CONTEXT_MASK(context, startAddr, endAddr, options) \
    ((vmLibInfo.pVmCtxMaskRtn == NULL) ? \
    (ERROR) : \
    ((*vmLibInfo.pVmCtxMaskRtn) (context, startAddr, endAddr, options)))

#define VM_CONTEXT_SWITCH(context, adrSpaceId) \
    ((vmLibInfo.pVmCtxSwitchRtn == NULL) ? \
    (OK) : \
    ((*vmLibInfo.pVmCtxSwitchRtn) (context, adrSpaceId)))


#define VM_PAGE_OPTIMIZE(context, virtAddr, len, option) \
	    ((vmLibInfo.pVmPageOptimizeRtn == NULL) ? \
	     (ERROR) : \
	     ((*vmLibInfo.pVmPageOptimizeRtn) (context, virtAddr, len, option)))

#define VM_PAGE_LOCK(context, virtAddr, len, option) \
	    ((vmLibInfo.pVmPageLockRtn == NULL) ? \
	     (ERROR) : \
	     ((*vmLibInfo.pVmPageLockRtn) (context, virtAddr, len, option)))

#define VM_PAGE_UNLOCK(context, virtAddr) \
	    ((vmLibInfo.pVmPageUnlockRtn == NULL) ? \
	     (ERROR) : \
	     ((*vmLibInfo.pVmPageUnlockRtn) (context, virtAddr)))

#define VM_MAX_PHYS_BITS_GET() \
	    ((vmLibInfo.pVmMaxPhysBitsGetRtn == NULL) ? \
	     (ERROR) : \
	     ((*vmLibInfo.pVmMaxPhysBitsGetRtn) ()))

#define VM_CONTIG_BLOCK_EACH(context, startAddr, numPages, \
			     stateMask, eachRtn, eachArg) \
	    ((vmLibInfo.pVmContigBlockEachRtn == NULL) ? \
	     (ERROR) : \
	     ((*vmLibInfo.pVmContigBlockEachRtn) (context, startAddr, numPages,\
						  stateMask, eachRtn, eachArg)))
	
IMPORT VM_LIB_INFO vmLibInfo;

IMPORT CLASS_ID vmContextClassId;

typedef struct vm_context
    {
    OBJ_CORE	objCore;	
    MMU_TRANS_TBL_ID mmuTransTbl;   
    SEMAPHORE sem;
    NODE links;
    } VM_CONTEXT;


/* the following data structure in used internally in mmuLib to define
 * the values for the architecture dependent states
 */

typedef struct
    {
    UINT archIndepMask;
    UINT archDepMask;
    UINT archIndepState;
    UINT archDepState;
    } STATE_TRANS_TUPLE;

/*
 * Create prototypes for functions that accept PHYS_ADDR.  This is required
 * because PHYS_ADDR can be larger than "int".  Some of these functions 
 * also accept an "MMU_TRANS_TBL *", but we don't have a complete type
 * definition for that data type when this file is processed.  So, we create
 * an incomplete struct mmuTransTblStruct datatype, and use it for the 
 * typedef.  Because of this, creators of the MMU_TRANS_TBL typedef now 
 * must ensure that that it has type 'struct mmuTransTblStruct'.
 */

struct mmuTransTblStruct;

typedef STATUS (*MMUPAGEMAPFUNCPTR)(struct mmuTransTblStruct *,
				    VIRT_ADDR, PHYS_ADDR, UINT, BOOL);
typedef STATUS (*MMUGLOBALPAGEMAPFUNCPTR)(VIRT_ADDR, PHYS_ADDR, UINT, BOOL);
typedef STATUS (*MMUTRANSLATEFUNCPTR)(struct mmuTransTblStruct *, 
				      VIRT_ADDR, PHYS_ADDR *);
typedef STATUS (*MMUPHYSTRANSLATEFUNCPTR)(struct mmuTransTblStruct *,
					  PHYS_ADDR, VIRT_ADDR *);

typedef struct
    {
    FUNCPTR mmuLibInit;
    MMU_TRANS_TBL_ID (*mmuTransTblCreate) ();
    FUNCPTR mmuTransTblDelete;
    FUNCPTR mmuEnable;
    FUNCPTR mmuStateSet;
    FUNCPTR mmuStateGet;
    MMUPAGEMAPFUNCPTR  mmuPageMap;
    MMUGLOBALPAGEMAPFUNCPTR mmuGlobalPageMap;
    MMUTRANSLATEFUNCPTR   mmuTranslate;
    VOIDFUNCPTR mmuCurrentSet;

    UINT    (*mmuPageSizeGet)();
    MMUPHYSTRANSLATEFUNCPTR  mmuPhysTranslate;
    FUNCPTR mmuAttrTranslate;
    FUNCPTR mmuPageOptimize;
    FUNCPTR mmuBufferWrite;
    FUNCPTR mmuPageLock;
    FUNCPTR mmuPageUnlock;
    FUNCPTR mmuMaxPhysBitsGet;

    FUNCPTR mmuPageUnProtect;
    FUNCPTR mmuPageUnMap;
    UINT    (*mmuBlockSizeGet)();
    UINT *  (*mmuTransTblGet)();
    FUNCPTR mmuTransTblUnion;
    FUNCPTR mmuTransTblUnionInit;
    FUNCPTR mmuTransTblMask;
    } MMU_LIB_FUNCS;

/* 
 * On architectures that either will not allow page based mmu support on the
 * entire virtual memory range, as in MIPS and SH, or where the virtual
 * address range must be shared with the host, as in the case of the
 * simulators, the address space allocator must know what ranges are
 * allocatable and in what privilege mode. 
 */

typedef enum virt_seg_type
    {
    VIRT_SEG_TYPE_NO_MAP	= 0x00000000,	/* mapping not possible */ 
    VIRT_SEG_TYPE_K_MAP		= 0x00000001,	/* kernel mode only */
    VIRT_SEG_TYPE_U_MAP		= 0x00000002,	/* user mode only */
    VIRT_SEG_TYPE_KU_MAP	= 0x00000004	/* kernel or user mode */
    } VIRT_SEG_TYPE;

typedef struct virt_seg_info
    {
    VIRT_ADDR 		start;
    UINT      		length;
    VIRT_SEG_TYPE	type;
    } VIRT_SEG_INFO;

typedef struct virt_extra_map_info
    {
    VIRT_ADDR 		virtAdr;
    UINT      		len;
    PHYS_ADDR		physAdr;
    } VIRT_EXTRA_MAP_INFO;

typedef struct vm_info
    {
    BOOL	noExecSupported;
    BOOL	optimSupported;
    UINT	mmuPageSize;
    UINT	ramDefaultAttr;
    UINT	cacheDefault;
    UINT	reserved1;
    } VM_INFO;

/* function declarations */

extern STATUS		vmLibInit ();
extern STATUS		vmCtxInit (VM_CONTEXT_ID pContext);
extern STATUS		vmCtxTerminate (VM_CONTEXT_ID context);
extern STATUS		vmCtxDestroy (VM_CONTEXT_ID context, BOOL dealloc);
extern STATUS		vmInfoGet (VM_INFO * pVmInfo);
extern STATUS		vmContigBlockEach (VM_CONTEXT_ID context,
					   VIRT_ADDR startAddr, UINT numPages,
					   UINT stateMask, FUNCPTR eachRtn,
					   UINT32 eachArg);
extern int		vmPageBlockSizeGet ();
extern VM_CONTEXT_ID	vmCurrentGet ();
extern STATUS 		vmCurrentSet (VM_CONTEXT_ID context);
extern void 		vmShowInit (void);
extern int              vmMaxPhysBitsGet (void); 
extern VM_CONTEXT_ID    vmCtxCreate ();
extern STATUS           vmCtxDelete (VM_CONTEXT_ID context);
extern STATUS           vmCtxInit (VM_CONTEXT_ID context);
extern STATUS           vmCtxUnion (VM_CONTEXT_ID vmCtx1, VM_CONTEXT_ID vmCtx2);
extern STATUS           vmCtxUnionInit (VM_CONTEXT_ID vmCtx, \
                                        VIRT_ADDR startAddr, UINT length);
extern STATUS           vmCtxMask (VM_CONTEXT_ID vmCtx, VIRT_ADDR virtStartAdr,
                                   VIRT_ADDR virtEndAdr, UINT options);
extern VIRT_ADDR	vmTransTblGet (VM_CONTEXT_ID context);
extern STATUS           vmContextSwitch (VM_CONTEXT_ID  context,
                                         int adrsSpaceId);
extern STATUS           vmPgUnMap (VM_CONTEXT_ID vmCtx, VIRT_ADDR virtAdr,
                                   UINT numBytes);

#endif	/* _ASMLANGUAGE */

#ifdef __cplusplus
}
#endif

#endif /* __INCvmLibPh */
