/* mmuArmXSCALELib.h - XScale MMU library header file */

/* Copyright 1998-2005 Wind River Systems, Inc. */

/*
modification history
--------------------
01f,08feb05,rec  use VIRT_ADDR and PHYS_ADDR instead of void
01e,10feb05,jb   Reverting to ROM mode for shared data/libraries
01d,09dec04,jb   Palify Arm MMU
01c,08sep03,scm  add P-bit support...
01b,23jul01,scm  change XScale name to conform to coding standards...
01a,31aug00,scm  created.
*/

#ifndef __INCmmuArmXSCALELibh
#define __INCmmuArmXSCALELibh

#ifdef __cplusplus
extern "C" {
#endif

#include "memLib.h"

#define MMU_ID_XSCALE           0x052000

/* Let the Pal code know Xscale has unique structures */

#define MMU_ARCHITECTURE_DEFINES_STRUCTURES

#include "mmuArmPalLib.h"

/* Xscale specific definitions follow */

#undef MMU_DEF_L1_PAGE
#define MMU_DEF_L1_PAGE 0x00000001          /* domain zero, Page descriptor */

#undef MMU_STATE_MASK_WRITABLE
#define MMU_STATE_MASK_WRITABLE         0x00000030      /* All APn bits */

#define MMU_STATE_X_BIT                 0x00000040

#define MMU_STATE_MASK_EX_BUFFERABLE    (MMU_STATE_X_BIT | MMU_STATE_MASK_BUFFERABLE)
#define MMU_STATE_MASK_EX_CACHEABLE     (MMU_STATE_X_BIT | MMU_STATE_MASK_CACHEABLE)

#undef MMU_STATE_VALID
#define MMU_STATE_VALID                 MMU_PTE_TYPE_EX_PAGE /* set to page type */

#undef MMU_STATE_WRITABLE
#define MMU_STATE_WRITABLE              0x00000030      /* APn bits to 11 */

#undef MMU_STATE_MASK_PROTECTION
#define MMU_STATE_MASK_PROTECTION       0x00000030      /* APn bits */

#undef MMU_STATE_SUP_RO
#define MMU_STATE_SUP_RO                0x00000000      /* AP: 00 */

#undef MMU_STATE_SUP_RW
#define MMU_STATE_SUP_RW                0x00000010      /* AP: 01 */

#undef MMU_STATE_USR_RO_SUP_RW
#define MMU_STATE_USR_RO_SUP_RW         0x00000020      /* AP: 10 */

#undef MMU_STATE_USR_RW_SUP_RW
#define MMU_STATE_USR_RW_SUP_RW         0x00000030      /* AP: 11 */

    /* only AP bits valid on extended small page tables are AP0 */

#define MMU_MSK_UNUSED_AP               0xfffff03f
#define MMU_MSK_CHK_APS                 0x00000fc0
#define MMU_MSK_X_BIT                   0x00000040

#define MMU_STATE_CACHEABLE_MINICACHE    0x48 /* allocate in minicache, X=1, CB=10  */

    /* the mini data cache attributes to the Auxiliary Control Reg */
    /* used by mmuSetMDBits...                                     */

#define MD_COPYBACK        0x00000000
#define MD_WRITEALLOCATE   0x00000010
#define MD_WRITETHRU       0x00000020

#define MMU_STATE_CACHEABLE_EX_COPYBACK  (MMU_STATE_X_BIT | MMU_STATE_CACHEABLE_COPYBACK)

#define MMU_STATE_EX_CACHEABLE          MMU_STATE_CACHEABLE_EX_COPYBACK

#define MMU_STATE_EX_CACHEABLE_NOT      0x0

    /* bufferable, not cacheable, no coalesce */

#define MMU_STATE_EX_BUFFERABLE         (MMU_STATE_X_BIT | MMU_STATE_BUFFERABLE)
#define MMU_STATE_EX_BUFFERABLE_NOT     0x0     /* will also set not cacheable, no coalesce */

#define MMU_ATTR_CACHE_MINICACHE        (MMU_ATTR_SPL_1 | MMU_ATTR_CACHE_COPYBACK)

    /*
     * two new states with extended small page tables, bufferable no coalesce, and
     * write back with read/write allocate...
     */

#define MMU_ATTR_NO_COALESCE (MMU_ATTR_SPL_2 | MMU_ATTR_CACHE_OFF) /* Cache -- no coalesce into buffers */
#define MMU_ATTR_WRITEALLOCATE (MMU_ATTR_SPL_3 | MMU_ATTR_CACHE_COPYBACK) /* Cache -- write allocate */

    /* cache states determined by MD-bits of Auxilliary Control Register */

#define MMU_ATTR_CACHE_MINICACHE_CB  (MMU_ATTR_SPL_1 | MMU_ATTR_CACHE_COPYBACK)
#define MMU_ATTR_CACHE_MINICACHE_WA  (MMU_ATTR_SPL_1 | MMU_ATTR_WRITEALLOCATE)
#define MMU_ATTR_CACHE_MINICACHE_WT  (MMU_ATTR_SPL_1 | MMU_ATTR_CACHE_WRITETHRU)

    /* Write-through mode is only available on some CPUs */

#define MMU_STATE_CACHEABLE_WRITETHROUGH        0x8

/*
 * Values to be used when mmuEnable() is called. This will be after the MMU has
 * been initialised by sysInit()/romInit() and after cacheLib has set whatever
 * cache enable settings have been chosen.
 *
 * M 1 Enable MMU
 * A 0 Disable address alignment fault
 * C X ((D-)Cache Enable) Controlled by cacheLib
 * W X (Write Buffer) Controlled by cacheLib
 * P 1 (PROG32) should be set before this
 * D 1 (DATA32) should be set before this
 * L 1 (Late abort on earlier CPUs) ignore
 * B X (Big/Little-endian) should be set before this
 * S 1 (System)
 * R 0 (ROM)
 * F 0 Should be Zero
 * Z X (Branch prediction enable on 810) Controlled by cacheLib
 * I X (I-cache enable) Controlled by cacheLib
 *
 * For time being, do not enable the address alignment fault, as GCC
 * currently generates unaligned accesses in its code, and switching this
 * on will cause immediate faults. So, do not put it into the enable
 * mask.
 *
 * We used to clear all the Reserved/Should Be Zero bits when the
 * MMU/MPU was enabled, by including one bits for them in the definition
 * of MMU_ENABLE_MASK. We no longer do this, as CPU designers may extend
 * the definitions of the bits within the MMU Control Register. The MMU
 * Control register is initialised within romInit()/sysInit(), as the BSP
 * is the only place where the particular details of the CPU/MMU are
 * actually known. In general, it will be appropriate for the BSP to
 * initialise the MMUCR Reserved/Should Be Zero bits to zero. When the
 * MMU is enabled, we will only change the bits we are particularly
 * concerned about, by using a Read-Modify-Write strategy.
 */

 /* This defines the operating mode MMUCR_ROM, MMUCR_SYSTEM, or neither */

#define MMUCR_MODE MMUCR_ROM            /* This MUST remain as ROM for shared libraries */

#define MMU_ENABLE_MASK  (MMUCR_M_ENABLE | MMUCR_SYSTEM | MMUCR_ROM)

#define MMU_ENABLE_VALUE (MMUCR_M_ENABLE | MMUCR_MODE)

#if (_BYTE_ORDER == _LITTLE_ENDIAN)
#define MMU_INIT_VALUE (MMUCR_PROG32 | MMUCR_DATA32 | MMUCR_L_ENABLE | \
                            MMUCR_MODE)
#else
#define MMU_INIT_VALUE (MMUCR_PROG32 | MMUCR_DATA32 | MMUCR_L_ENABLE | \
                            MMUCR_BIGEND | MMUCR_MODE)
#endif

#ifndef _ASMLANGUAGE

#if (_BYTE_ORDER == _LITTLE_ENDIAN)

/* little-endian */

/* First level page descriptors */

typedef struct
    {
    UINT type	: 2;		/* descriptor type, 1 => page */
    UINT pad1	: 2;		/* SBZ */
    UINT pad2	: 1;		/* SBO */
    UINT domain	: 4;		/* domain number */
    UINT pbit   : 1;            /* XSCALE 'P' bit */
    UINT addr	: 22;		/* base address of page table */
    } PAGE_DESC_FIELD;


/* Layout of Page Table Entries (PTEs), actually small page descriptors */

typedef struct
    {
    UINT type   : 2;            /* page type, 3 => extended small page */
    UINT cb     : 2;            /* cacheable/bufferable bits */
    UINT ap     : 2;            /* access permission */
    UINT tex    : 4;            /* type extension field */
    UINT sbz    : 2;            /* should be zero */
    UINT addr   : 20;           /* page base address */
    } PTE_FIELD;

#else /* (_BYTE_ORDER == _LITTLE_ENDIAN) */

/* big-endian */

/* First level page descriptors */

typedef struct
    {
    UINT addr	: 22;		/* descriptor type, 1 => page */
    UINT pbit   : 1;            /* XSCALE 'P' bit */
    UINT domain	: 4;		/* domain number */
    UINT pad2	: 1;		/* SBO */
    UINT pad1	: 2;		/* SBZ */
    UINT type	: 2;		/* base address of page table */
    } PAGE_DESC_FIELD;

/* Layout of Page Table Entries (PTEs), actually small page descriptors */

typedef struct
    {
    UINT addr   : 20;           /* page base address */
    UINT sbz    : 2;            /* should be zero */
    UINT tex    : 4;            /* type extension field */
    UINT ap     : 2;            /* access permission */
    UINT cb     : 2;            /* cacheable/bufferable bits */
    UINT type   : 2;            /* page type, 3 => extended small page */
    } PTE_FIELD;

#endif /* (_BYTE_ORDER == _LITTLE_ENDIAN) */

/* First level descriptor access */

typedef union
    {
    PAGE_DESC_FIELD fields;
    UINT32 bits;
    } MMU_LEVEL_1_DESC;

/* Second level descriptor access */

typedef union
    {
    PTE_FIELD fields;
    UINT32 bits;
    } PTE;

/* Pointer to a First level table */

typedef struct mmuTransTblStruct
    {
    MMU_LEVEL_1_DESC *pLevel1Table;
    } MMU_TRANS_TBL;

#if defined(__STDC__) || defined(__cplusplus)

IMPORT  void    mmuArmXSCALELibInstall (VIRT_ADDR(physToVirt) (PHYS_ADDR),
                                        PHYS_ADDR(virtToPhys) (VIRT_ADDR));
IMPORT  void    mmuArmXSCALETtbrSet (MMU_LEVEL_1_DESC *);
IMPORT  MMU_LEVEL_1_DESC *  mmuArmXSCALETtbrGet ( void);
IMPORT  void    mmuArmXSCALEDacrSet (UINT32 dacrVal);
IMPORT  void    mmuArmXSCALETLBIDFlushEntry (void *addr);
IMPORT  void    mmuArmXSCALETLBIDFlushAll (void);
IMPORT  void    mmuArmXSCALEAEnable (UINT32 cacheState);
IMPORT  void    mmuArmXSCALEADisable (void);
IMPORT  STATUS  mmuArmXSCALEPBitSet ( void * virtAddr, UINT32 size);
IMPORT  STATUS  mmuArmXSCALEPBitClear ( void * virtAddr, UINT32 size);
IMPORT  STATUS  mmuArmXSCALEPBitGet ( void * virtAddr );
IMPORT  STATUS  mmuArmXSCALEAcrGet (void );
IMPORT  void    mmuArmXSCALEAcrSet (UINT32 acrVal);

#else   /* __STDC__ */

IMPORT  void    mmuArmXSCALELibInstall ();
IMPORT  void    mmuArmXSCALETtbrSet ();
IMPORT  MMU_LEVEL_1_DESC *  mmuArmXSCALETtbrGet ();
IMPORT  void    mmuArmXSCALEDacrSet ();
IMPORT  void    mmuArmXSCALETLBIDFlushEntry ();
IMPORT  void    mmuArmXSCALETLBIDFlushAll ();
IMPORT  void    mmuArmXSCALEAEnable ();
IMPORT  void    mmuArmXSCALEADisable ();
IMPORT  STATUS  mmuArmXSCALEPBitSet ();
IMPORT  STATUS  mmuArmXSCALEPBitClear ();
IMPORT  STATUS  mmuArmXSCALEPBitGet ();
IMPORT  STATUS  mmuArmXSCALEAcrGet ();
IMPORT  void    mmuArmXSCALEAcrSet ();

#endif  /* __STDC__ */

#endif  /* _ASMLANGUAGE */

#ifdef __cplusplus
}
#endif

#endif  /* __INCmmuArmXSCALELibh */
