/*
 * Xmc.h
 *
 *  Created on: Oct 13, 2018
 *      Author: ax
 */
#include <Type.h>
#include <stdio.h>
#include <Device.h>

//see memory map solution for more information.
typedef enum {
    //MAR attribute
    MAR_SHM         = 0x00,     //no cache, no prefetch, used for IO buffer
    MAR_PC          = 0x01,     //program cache enable
    MAR_WTE         = 0x02,     //write though enable
    MAR_PCX         = 0x04,     //external program cache support
    MAR_FPX         = 0x08,     //prefecthable
    //MPAX_RWX_PREM
    MAPX_NU         = 0x00,     //map not used
    MPAX_UX         = 0x01,     //user executable
    MPAX_UW         = 0x02,     //user writable
    MPAX_UR         = 0x04,     //user readable
    MPAX_SX         = 0x08,     //Superuser executable
    MPAX_SW         = 0x10,     //Superuser writable
    MPAX_SR         = 0x20,     //Superuser readable
    MPAX_EXT        = 0x40,     //External memory interface (EMIF) map enabled
    MPAX_EN         = 0x80      //map enable
} Xmc_Attr;

#define SEG_POW_SZ(x)       (x - 1)
#define SEG_4K      SEG_POW_SZ(12)
#define SEG_8K      SEG_POW_SZ(13)
#define SEG_16K     SEG_POW_SZ(14)
#define SEG_32K     SEG_POW_SZ(15)
#define SEG_64K     SEG_POW_SZ(16)
#define SEG_128K    SEG_POW_SZ(17)
#define SEG_256K    SEG_POW_SZ(18)
#define SEG_512K    SEG_POW_SZ(19)
#define SEG_1M      SEG_POW_SZ(20)
#define SEG_2M      SEG_POW_SZ(21)
#define SEG_4M      SEG_POW_SZ(22)
#define SEG_8M      SEG_POW_SZ(23)
#define SEG_16M     SEG_POW_SZ(24)
#define SEG_32M     SEG_POW_SZ(25)
#define SEG_64M     SEG_POW_SZ(26)
#define SEG_128M    SEG_POW_SZ(27)
#define SEG_256M    SEG_POW_SZ(28)
#define SEG_512M    SEG_POW_SZ(29)
#define SEG_1G      SEG_POW_SZ(30)
#define SEG_2G      SEG_POW_SZ(31)
#define SEG_4G      SEG_POW_SZ(32)
#define SEG_SZ(x)   (1 << (x + 1))
#define SEG_4K_SZ(x)   (1 << (x - SEG_4K))

typedef enum {
    XMC_MAP = 0,    //step 1, map only for load program to correct physical address
    XMC_ROBUST,     //step 2, robust enable, remove x @data & w @code segment.
    XMC_ROUTINE_MAX_CNT
} Xmc_Routine;

//TODO FIXME map to your own.
//it should be called twice with XMC_MAP & XMC_ROBUST
extern void __init Xmc_setup(CorePac cid, Xmc_Routine routine);


/*
 *  ======== Cache_setMar ========
 *  Set the MAR register(s) that corresponds to the specified address range.
 */
extern void __init
Xmc_setMar(
    Uint32   base4k,
    Uint32   end4k,
    Xmc_Attr attr
);

extern void __init
Xmc_map(
    Uint8    priorityIndex,
    Uint32   pa4kAddr,
    Uint32   va,
    Uint8    pow2Size,
    Uint8    premission
);


/*====Cache====*/
/* Type */
typedef enum ICache_Type {
    Cache_L1P   = 0x1,
    Cache_L1D   = 0x2,
    Cache_L1    = 0x3,
    Cache_L2P   = 0x4,
    Cache_L2D   = 0x8,
    Cache_L2    = 0xC,
    Cache_ALLP  = 0x5,
    Cache_ALLD  = 0xA,
    Cache_ALL   = 0x7fff
} Cache_Type;


//use mfence to ensure memory operate finished
static __inline void
Cache_wait(
    void
){
    _mfence();
    _mfence();
}

/**  ======== Cache_inv ========
 *  Invalidate the range of memory within the specified starting address and
 *  byte count.  The range of addresses operated on gets quantized to whole
 *  cache lines in each cache.  All cache lines in range are invalidated in L1P
 *  cache.  All cache lines in range are invalidated in L1D cache.
 *  All cache lines in range are invaliated in L2 cache. */
extern
void
Cache_inv(
    void*       blockPtr,
    size_t      byteCnt,
    Cache_Type  type,
    bool        wait
);
/**  ======== Cache_wb ========
 *  Writes back the range of memory within the specified starting address
 *  and byte count.  The range of addresses operated on gets quantized to
 *  whole cache lines in each cache.  There is no effect on L1P cache.
 *  All cache lines within the range are left valid in L1D cache and the data
 *  within the range in L1D cache will be written back to L2 or external.
 *  All cache lines within the range are left valid in L2 cache and the data
 *  within the range in L2 cache will be written back to external. */
extern
void
Cache_wb(
    void*       blockPtr,
    size_t      byteCnt,
    Cache_Type  type,
    bool        wait
);

/**  ======== Cache_wbInv ========
 *  Writes back and invalidates the range of memory within the specified
 *  starting address and byte count.  The range of addresses operated on gets
 *  quantized to whole cache lines in each cache.  All cache lines within range
 *  are invalidated in L1P cache.  All cache lines within the range are
 *  written back to L2 or external and then invalidated in L1D cache
 *  All cache lines within the range are written back to external and then
 *  invalidated in L2 cache. */
extern
void
Cache_wbInv(
    void*       blockPtr,
    size_t      byteCnt,
    Cache_Type  type,
    bool        wait
);

/*
 *  ======== Cache_wbInvAll ========
 *  Performs a global write back and invalidate.  All cache lines are
 *  invalidated in L1P cache.  All cache lines are written back to L2 or
 *  or external then invalidated in L1D cache.  All cache lines are
 *  written back to external and then invalidated in L2 cache.
 */
extern void Cache_wbInvAll();

