/* mmuArmArch6PalLib.h - ARM MMU library header file */

/* Copyright 2005 Wind River Systems, Inc. */

/*
modification history
--------------------
01s,30mar05,jb   Extracted from mmuArmArch6PalLib.h, 01u
*/


#ifndef	__INCmmuArmArch6PalLibh
#define	__INCmmuArmArch6PalLibh

#ifdef __cplusplus
extern "C" {
#endif

#ifndef	_ASMLANGUAGE

#include "memLib.h"

#endif /* _ASMLANGUAGE */

/*
 * The page size we will use. Ignore the sub-page, Large Page and Tiny
 * Page features.
 */

#define MMU_PAGE_SIZE 4096

/*
 * For integrator BSPs
 */

#define PAGE_SIZE            MMU_PAGE_SIZE

/*
 * The amount described by a Level 1 Descriptor, which equals the smallest
 * amount of VM allocatable in VxWorks.
 */

#define MMU_PAGE_BLOCK_SIZE 0x100000	/* 1 MByte */

#define MMU_ADRS_TO_L1D_INDEX_SHIFT	20

#define MMU_PTE_INDEX_MASK  0x000FF000	/* extract PTE index from Virt Addr */
#define MMU_PTE_INDEX_SHIFT 12		/* shift to make that into PTE index */

#define MMU_L1D_TO_BASE_SHIFT 10	/* to turn L1D to PT base addr */

#define MMU_ADDR_PI_MASK    0x00000FFF	/* extract page index from Virt Addr */

#define MMU_PTE_BA_MASK	    0xFFFFF000	/* extract phys base address from PTE */
#define MMU_PTE_SECA_MASK   0xFFF00000	/* extract section base address from PTE */
#define MMU_PTE_TYPE_MASK   0x00000003	/* bits [0:1] */
#define MMU_L1PD_PTE_MASK   0xFFFFFC00	/* extract phys PTE address from L1PD */
#define MMU_L1PD_TYPE_MASK  0x00000003	/* type bits [0:1] */
#define MMU_L1PD_AP_MASK    0x00000C00	/* AP bits [10:11] */
#define MMU_L1PD_CB_MASK    0x0000000C	/* CB bits [3:2] */
#define MMU_L1PD_P_MASK     0x00000200	/* P bit [9] */

#define MMU_L1PD_AP_SHIFT   10
#define MMU_L1PD_CB_SHIFT   2
#define MMU_L1PD_P_SHIFT    9

#define MMU_L1PD_DOMAIN_MASK		0x000001E0      /* domain bits 5-8 */
#define MMU_L1PD_DOMAIN_SHIFT	5	/* shift to make domain field in L1PD */

/*
 * The Domain Access Control Register specifies the way in which access
 * rights are treated for each of sixteen domains. For each domain the
 * current state can be:
 *
 *   00 no access: all accesses generate aborts
 *   01 client:    access rights specified in page tables are checked.
 *   10 reserved
 *   11 manager:   accesses not checked against page tables: all access allowed.
 */

#define MMU_DACR_VAL_NORMAL             1       /* client of 0 */
#define MMU_DACR_VAL_UNPROTECT          0x03    /* manager of 0 */

/* Level 1 Descriptor types */

#define MMU_DESC_TYPE_PAGE 1

#define MMU_DEF_L1_PAGE 0x00000011          /* domain zero, Page descriptor */

/*
 * Section descriptors, such as might be used to set up an intermediate
 * set of page tables on processors such as SA-1100/SA-1500 where this
 * needs to be done from BSP initialisation, before vm(Base)Lib sets up
 * a proper set of page tables.
 */

#define MMU_RAM_SECT_DESC   	0xC1E  	/* R/W cacheable bufferable domain 0 */
#define MMU_MINICACHE_SECT_DESC	0xC1A   /* R/W C+ B- domain 0 */
#define MMU_OTHER_SECT_DESC	0xC12   /* R/W C- B- domain 0 */


/* Level 2 Descriptor or Page Table Entry (PTE) types */

#define MMU_PTE_TYPE_FAULT   0              /* any access will cause a fault */
#define MMU_PTE_TYPE_LG_PAGE 1              /* Large page descriptor */
#define MMU_PTE_TYPE_SM_PAGE 2              /* Small page descriptor */
#define MMU_PTE_TYPE_EX_PAGE 3              /* Extended page descriptor */

/*
 * The Translation Table Base register (TTBR) points to a table of Level 1
 * Descriptors. these are either Invalid Section descriptors, Section
 * Descriptors, or Page Table Descriptors. If Page Table Descriptors, they
 * each point to a table of Level 2 Page Descriptors, or Page Table Entries
 * (PTEs).
 * The 32-bit (virtual) address space allows for 4096 M. Each Level 1
 * Descriptor describes a 1 M area of memory. There are therefore 4096 Level
 * 1 Descriptors, and each table of 256 Level 2 Page Descriptors (PTEs)
 * describes 256 4 kbyte pages.
 */

#define MMU_NUM_L1_DESCS 4096
#define MMU_NUM_L2_DESCS 256
#define MMU_L1_TABLE_SIZE (MMU_NUM_L1_DESCS * sizeof(MMU_LEVEL_1_DESC))


/* No. of pages a Level 1 Descriptor table takes up */

#define MMU_L1_DESC_PAGES (MMU_L1_TABLE_SIZE / MMU_PAGE_SIZE)


/* Size of a Page Table */

#define MMU_PAGE_TABLE_SIZE (MMU_NUM_L2_DESCS * sizeof(PTE))


/*
 * Architecture-dependent MMU states. These are states settable for pages and
 * here they correspond to settings in the Page Table Entries (PTEs).
 *
 * We set Valid/Invalid by setting a Fault second-level descriptor rather
 * than by using the Access Permissions within a small page second-level
 * descriptor. This is because we will want to use the Access Permissions to
 * control supervisor mode access and we cannot then use the AP bits
 * to disallow access as the SR bits in the MMU Control register must be set
 * to 10 in order to control read/write access from Supervisor mode.
 */

#define MMU_STATE_INVALID_STATE 0xFFFFFFFF

#define MMU_STATE_MASK_VALID		0x00000003	/* 2nd level desc type*/

#define MMU_STATE_MASK_WRITABLE         0x00000230      /* All APn bits */

#define MMU_STATE_MASK_CACHEABLE	0x0000000C	/* CB bits */
#define MMU_STATE_MASK_BUFFERABLE       0x00000004

#define MMU_STATE_VALID                 MMU_PTE_TYPE_SM_PAGE /* set to page type */
#define MMU_STATE_VALID_NOT		MMU_PTE_TYPE_FAULT	/* set to type fault */

#define MMU_STATE_MASK_PROTECTION       0x00000230      /* APX + APn bits */

#define MMU_STATE_SUP_RO                0x00000210      /* APX: 1 AP: 01 */
#define MMU_STATE_SUP_RW                0x00000010      /* APX: 0 AP: 01 */
#define MMU_STATE_SUP_RO_USR_RO         0x00000220      /* APX: 1 AP: 10 */
#define MMU_STATE_SUP_RW_USR_RO         0x00000020      /* APX: 0 AP: 10 */
#define MMU_STATE_SUP_RW_USR_RW         0x00000030      /* APX: 0 AP: 11 */

#define MMU_STATE_WRITABLE              MMU_STATE_SUP_RW

#define MMU_STATE_WRITABLE_NOT		MMU_STATE_SUP_RO

/* Write-through mode is only available on some CPUs */

#define MMU_STATE_CACHEABLE_WRITETHROUGH        0x8

#define MMU_STATE_CACHEABLE_COPYBACK		0xC  /* write back */

/*
 * Set the default state to be copyback. CACHEABLE_WRITETHROUGH can also be
 * selected on those cache designs that permit it.
 */

#define MMU_STATE_CACHEABLE		MMU_STATE_CACHEABLE_COPYBACK

#define MMU_STATE_CACHEABLE_NOT		0x0

#define MMU_STATE_BUFFERABLE		0x4	/* bufferable, not cacheable */
#define MMU_STATE_BUFFERABLE_NOT	0x0	/* will also set not cacheable*/

/*
 * Under VxWorks AE 1.1, when setting either of the attributes SPL_0 or SPL_1,
 * using vmPgAttrSet(), it is necessary to:
 *
 * 1. specify MMU_ATTR_SPL_MSK ORred with MMU_ATTR_CACHE_MSK in the mask
 *    argument to the routine, and:
 * 2. specify the appropriate cache state ORred in to the state argument
 *    to the routine.
 *
 * So, to set a page as bufferable not cacheable, it is necessary to
 * specify both MMU_ATTR_SPL_0 and specify MMU_ATTR_CACHE_OFF in the
 * state argument. To mark a page as cacheable in the minicache,
 * MMU_ATTR_SPL_1 and MMU_ATTR_CACHE_COPYBACK must be specified in the
 * state argument. In both cases, specify MMU_ATTR_SPL_MSK ORred with
 * MMU_ATTR_CACHE_MSK in the mask argument.
 *
 * These restrictions were not present in VxWorks AE 1.0.
 *
 * To "unset" the special attibutes SPL_0 or SPL_1, specify
 * MMU_ATTR_CACHE_MSK in the mask parameter (without MMU_ATTR_SPL_MSK),
 * and specify the cacheability required in the state argument.
 *
 * We provide the following aliases on ARM, to make life easier.
 */

#define MMU_ATTR_BUFFERABLE		(MMU_ATTR_SPL_0)

/* MMU Control Register bit allocations */

#define MMUCR_M_ENABLE	 (1<<0)  /* MMU enable */
#define MMUCR_A_ENABLE	 (1<<1)  /* Address alignment fault enable */
#define MMUCR_C_ENABLE	 (1<<2)  /* (data) cache enable */
#define MMUCR_W_ENABLE	 (1<<3)  /* write buffer enable */
#define MMUCR_PROG32	 (1<<4)  /* PROG32 */
#define MMUCR_DATA32	 (1<<5)  /* DATA32 */
#define MMUCR_L_ENABLE	 (1<<6)  /* Late abort on earlier CPUs */
#define MMUCR_BIGEND	 (1<<7)  /* Big-endian (=1), little-endian (=0) */
#define MMUCR_SYSTEM	 (1<<8)  /* System bit, modifies MMU protections */
#define MMUCR_ROM	 (1<<9)  /* ROM bit, modifies MMU protections */
#define MMUCR_F		 (1<<10) /* Should Be Zero */
#define MMUCR_Z_ENABLE	 (1<<11) /* Branch prediction enable on 810 */
#define MMUCR_I_ENABLE	 (1<<12) /* Instruction cache enable */
#define MMUCR_V_ENABLE	 (1<<13) /* Exception vectors remap to 0xFFFF0000 */
#define MMUCR_ALTVECT    MMUCR_V_ENABLE /* alternate vector select */
#define MMUCR_RR_ENABLE	 (1<<14) /* Round robin cache replacement enable */
#define MMUCR_ROUND_ROBIN MMUCR_RR_ENABLE  /* round-robin placement */
#define MMUCR_DISABLE_TBIT   (1<<15) /* disable TBIT */
#define MMUCR_ENABLE_DTCM    (1<<16) /* Enable Data TCM */
#define MMUCR_ENABLE_ITCM    (1<<18) /* Enable Instruction TCM */
#define MMUCR_UNALIGNED_ENABLE (1<<22) /* Enable unaligned access */
#define MMUCR_EXTENDED_PAGE (1<<23)  /* Use extended PTE format */
#define MMUCR_VECTORED_INTERRUPT (1<<24) /* Enable VIC Interface */

#define ADDR_TO_PAGE 12		/* shift phys address to PTE page base address*/

#ifndef	_ASMLANGUAGE

#if (_BYTE_ORDER == _LITTLE_ENDIAN)

/* little-endian */

/* First level page descriptors */

typedef struct
    {
    UINT type	: 2;		/* descriptor type, 1 => page */
    UINT pad1	: 2;		/* SBZ */
    UINT pad2	: 1;		/* SBO */
    UINT domain	: 4;		/* domain number */
    UINT pbit   : 1;            /* 'P' bit */
    UINT addr	: 22;		/* base address of page table */
    } PAGE_DESC_FIELD;


/* Layout of Page Table Entries (PTEs), actually small page descriptors */

typedef struct
    {
    UINT type   : 2;            /* page type, 3 => extended small page */
    UINT cb     : 2;            /* cacheable/bufferable bits */
    UINT ap     : 2;            /* access permission */
    UINT tex    : 4;            /* type extension field */
    UINT sbz    : 2;            /* should be zero */
    UINT addr   : 20;           /* page base address */
    } PTE_FIELD;

#else /* (_BYTE_ORDER == _LITTLE_ENDIAN) */

/* big-endian */

/* First level page descriptors */

typedef struct
    {
    UINT addr	: 22;		/* descriptor type, 1 => page */
    UINT pbit   : 1;            /* XSCALE 'P' bit */
    UINT domain	: 4;		/* domain number */
    UINT pad2	: 1;		/* SBO */
    UINT pad1	: 2;		/* SBZ */
    UINT type	: 2;		/* base address of page table */
    } PAGE_DESC_FIELD;

/* Layout of Page Table Entries (PTEs), actually small page descriptors */

typedef struct
    {
    UINT addr   : 20;           /* page base address */
    UINT sbz    : 2;            /* should be zero */
    UINT tex    : 4;            /* type extension field */
    UINT ap     : 2;            /* access permission */
    UINT cb     : 2;            /* cacheable/bufferable bits */
    UINT type   : 2;            /* page type, 3 => extended small page */
    } PTE_FIELD;

#endif /* (_BYTE_ORDER == _LITTLE_ENDIAN) */

/* First level descriptor access */

typedef union
    {
    PAGE_DESC_FIELD fields;
    UINT32 bits;
    } MMU_LEVEL_1_DESC;

/* Second level descriptor access */

typedef union
    {
    PTE_FIELD fields;
    UINT32 bits;
    } PTE;

/* Pointer to a First level table */

typedef struct mmuTransTblStruct
    {
    MMU_LEVEL_1_DESC *pLevel1Table;
    } MMU_TRANS_TBL;
/* Externally visible mmuLib, mmuMapLib and mmuALib2 routines */

IMPORT UINT32 mmuReadId (void);
IMPORT PHYS_ADDR mmuVirtToPhys (VIRT_ADDR addr);
IMPORT VIRT_ADDR mmuPhysToVirt (PHYS_ADDR addr);

IMPORT UINT32	(* mmuCrGet) (void);
IMPORT void	(* mmuModifyCr) (UINT32 value, UINT32 mask);
IMPORT UINT32	mmuHardCrGet (void);
IMPORT UINT32	mmuSoftCrGet (void);
IMPORT void	mmuModifyHardCr (UINT32 value, UINT32 mask);
IMPORT void	mmuModifySoftCr (UINT32 value, UINT32 mask);

/*
 * Pointer to a function that can be filled in by the BSP to point to a
 * function that returns a memory partition id for an area of memory to store
 * the Level 1 and Level 2 page tables. This area must be big enough for all
 * use. No provision is made to use that memory and then continue using
 * system memory once that has been filled.
 * N.B. at the time of writing, this feature has NEVER been tested at all.
 */

IMPORT PART_ID (* _func_armPageSource) (void);

#endif  /* _ASMLANGUAGE */

#ifdef __cplusplus
}
#endif

#endif  /* __INCmmuArmArch6PalLibh */
