#ifndef __ASM_MMU_H_
#define __ASM_MMU_H_

#include <utils/const.h>
#include <utils/atomic.h>

#define MMCF_AARCH32	0x1	/* mm context flag for AArch32 executables */
#define USER_ASID_BIT	48
#define USER_ASID_FLAG	(UL(1) << USER_ASID_BIT)
#define TTBR_ASID_MASK	(UL(0xffff) << 48)

#ifndef __ASSEMBLY__

#include <asm/cpufeature.h>

typedef struct {
    atomic64_t	id;
} mm_context_t;

/*
 * This macro is only used by the TLBI code, which cannot race with an
 * ASID change and therefore doesn't need to reload the counter using
 * atomic64_read.
 */
#define ASID(mm)	((mm)->context.id.counter & 0xffff)

static inline bool arm64_kernel_unmapped_at_el0(void)
{
    return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
           cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
}

static inline bool arm64_kernel_use_ng_mappings(void)
{
    /* What's a kpti? Use global mappings if we don't know. */
    if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
        return false;

    /*
     * Note: this function is called before the CPU capabilities have
     * been configured, so our early mappings will be global. If we
     * later determine that kpti is required, then
     * kpti_install_ng_mappings() will make them non-global.
     */
    if (arm64_kernel_unmapped_at_el0())
        return true;

    return false;
}

#define INIT_MM_CONTEXT(name)	\
    .pgd = init_pg_dir,

extern void paging_init(void);
extern void bootmem_init(void);
extern void vmemmap_init(phys_addr_t phys, unsigned long virt, size_t size);

extern void mark_linear_text_alias_ro(void);
extern void mark_rodata_ro(void);

#endif /* !__ASSEMBLY__ */
#endif /* !__ASM_MMU_H_ */
