/*
 * Copyright (C) 2017-2020 Alibaba Group Holding Limited
 *
 * SPDX-License-Identifier: GPL-2.0+
 */

#ifndef __ASM_SYSTEM_H
#define __ASM_SYSTEM_H

#include <common.h>
#include <linux/compiler.h>

#ifdef __KERNEL__
/*
* CR1 bits (CP#15 CR1)
*/
#define CR_M    (1 << 0)    /* MMU enable                */
#define CR_A    (1 << 1)    /* Alignment abort enable        */
#define CR_C    (1 << 2)    /* Dcache enable            */
#define CR_W    (1 << 3)    /* Write buffer enable            */
#define CR_P    (1 << 4)    /* 32-bit exception handler        */
#define CR_D    (1 << 5)    /* 32-bit data address range        */
#define CR_L    (1 << 6)    /* Implementation defined        */
#define CR_B    (1 << 7)    /* Big endian                */
#define CR_S    (1 << 8)    /* System MMU protection        */
#define CR_R    (1 << 9)    /* ROM MMU protection            */
#define CR_F    (1 << 10)   /* Implementation defined        */
#define CR_Z    (1 << 11)   /* Implementation defined        */
#define CR_I    (1 << 12)   /* Icache enable            */
#define CR_V    (1 << 13)   /* Vectors relocated to 0xffff0000    */
#define CR_RR   (1 << 14)   /* Round Robin cache replacement    */
#define CR_L4   (1 << 15)   /* LDR pc can set T bit            */
#define CR_DT   (1 << 16)
#define CR_IT   (1 << 18)
#define CR_ST   (1 << 19)
#define CR_FI   (1 << 21)   /* Fast interrupt (lower latency mode)    */
#define CR_U    (1 << 22)   /* Unaligned access operation        */
#define CR_XP   (1 << 23)   /* Extended page tables            */
#define CR_VE   (1 << 24)   /* Vectored interrupts            */
#define CR_EE   (1 << 25)   /* Exception (Big) Endian        */
#define CR_TRE  (1 << 28)   /* TEX remap enable            */
#define CR_AFE  (1 << 29)   /* Access flag enable            */
#define CR_TE   (1 << 30)   /* Thumb exception enable        */

#define PGTABLE_SIZE        (4096 * 4)

/*
* This is used to ensure the compiler did actually allocate the register we
* asked it for some inline assembly sequences.  Apparently we can't trust
* the compiler from one version to another so a bit of paranoia won't hurt.
* This string is meant to be concatenated with the inline asm string and
* will cause compilation to stop on mismatch.
* (for details, see gcc PR 15089)
*/
#define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"

#ifndef __ASSEMBLY__

/**
 * save_boot_params() - Save boot parameters before starting reset sequence
 *
 * If you provide this function it will be called immediately U-Boot starts,
 * both for SPL and U-Boot proper.
 *
 * All registers are unchanged from U-Boot entry. No registers need be
 * preserved.
 *
 * This is not a normal C function. There is no stack. Return by branching to
 * save_boot_params_ret.
 *
 * void save_boot_params(u32 r0, u32 r1, u32 r2, u32 r3);
 */

/**
 * save_boot_params_ret() - Return from save_boot_params()
 *
 * If you provide save_boot_params(), then you should jump back to this
 * function when done. Try to preserve all registers.
 *
 * If your implementation of save_boot_params() is in C then it is acceptable
 * to simply call save_boot_params_ret() at the end of your function. Since
 * there is no link register set up, you cannot just exit the function. U-Boot
 * will return to the (initialised) value of lr, and likely crash/hang.
 *
 * If your implementation of save_boot_params() is in assembler then you
 * should use 'b' or 'bx' to return to save_boot_params_ret.
 */
void save_boot_params_ret(void);

#define isb() __asm__ __volatile__ ("" : : : "memory")

#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");

#define wfi()

static inline unsigned long get_cpsr(void)
{
    unsigned long cpsr;
    asm volatile("mrs %0, cpsr" : "=r"(cpsr):);
    return cpsr;
}

static inline int is_hyp(void)
{
    /* ... so without LPAE support we can optimize all hyp code away */
    return 0;
}

static inline unsigned int get_cr(void)
{
    unsigned int val;

    if (is_hyp())
        asm volatile("mrc p15, 4, %0, c1, c0, 0	@ get CR" : "=r"(val)
                    :
                    : "cc");
    else
        asm volatile("mrc p15, 0, %0, c1, c0, 0	@ get CR" : "=r"(val)
                    :
                    : "cc");
    return val;
}

static inline void set_cr(unsigned int val)
{
    if (is_hyp())
        asm volatile("mcr p15, 4, %0, c1, c0, 0	@ set CR" :
                    : "r"(val)
                    : "cc");
    else
        asm volatile("mcr p15, 0, %0, c1, c0, 0	@ set CR" :
                    : "r"(val)
                    : "cc");
    isb();
}

static inline unsigned int get_dacr(void)
{
    unsigned int val;
    asm("mrc p15, 0, %0, c3, c0, 0	@ get DACR" : "=r"(val) : : "cc");
    return val;
}

static inline void set_dacr(unsigned int val)
{
    asm volatile("mcr p15, 0, %0, c3, c0, 0	@ set DACR"
                : : "r"(val) : "cc");
    isb();
}

#define TTB_SECT_AP        (3 << 10)
/* options available for data cache on each page */
enum dcache_option {
    DCACHE_OFF = 0x12,
    DCACHE_WRITETHROUGH = 0x1a,
    DCACHE_WRITEBACK = 0x1e,
    DCACHE_WRITEALLOC = 0x16,
};

/* Size of an MMU section */
enum {
    MMU_SECTION_SHIFT    = 20, /* 1MB */
    MMU_SECTION_SIZE    = 1 << MMU_SECTION_SHIFT,
};

/**
 * Register an update to the page tables, and flush the TLB
 *
 * \param start        start address of update in page table
 * \param stop        stop address of update in page table
 */
void mmu_page_table_flush(unsigned long start, unsigned long stop);

#endif /* __ASSEMBLY__ */

#define arch_align_stack(x) (x)

#endif /* __KERNEL__ */

#ifndef __ASSEMBLY__
/**
 * Change the cache settings for a region.
 *
 * \param start        start address of memory region to change
 * \param size        size of memory region to change
 * \param option    dcache option to select
 */
void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
                                    enum dcache_option option);

#ifdef CONFIG_SYS_NONCACHED_MEMORY
void noncached_init(void);
phys_addr_t noncached_alloc(size_t size, size_t align);
#endif /* CONFIG_SYS_NONCACHED_MEMORY */

#endif /* __ASSEMBLY__ */

#endif /* __ASM_SYSTEM_H */
