/*
 * Based on arch/arm/mm/proc.S
 *
 * Copyright (C) 2001 Deep Blue Solutions Ltd.
 * Copyright (C) 2012 ARM Ltd.
 * Author: Catalin Marinas <catalin.marinas@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <seminix/init.h>
#include <seminix/linkage.h>
#include <asm/assembler.h>
#include <asm/pgtable-hwdef.h>

#ifdef CONFIG_ARM64_64K_PAGES
#define TCR_TG_FLAGS	TCR_TG0_64K | TCR_TG1_64K
#elif defined(CONFIG_ARM64_16K_PAGES)
#define TCR_TG_FLAGS	TCR_TG0_16K | TCR_TG1_16K
#else /* CONFIG_ARM64_4K_PAGES */
#define TCR_TG_FLAGS	TCR_TG0_4K | TCR_TG1_4K
#endif

#define TCR_SMP_FLAGS	TCR_SHARED

/* PTWs cacheable, inner/outer WBWA */
#define TCR_CACHE_FLAGS	TCR_IRGN_WBWA | TCR_ORGN_WBWA

#define MAIR(attr, mt)	((attr) << ((mt) * 8))

/*
 *	cpu_do_idle()
 *
 *	Idle the processor (wait for interrupt).
 */
ENTRY(cpu_do_idle)
    dsb	sy				// WFI may enter a low-power mode
    wfi
    ret
ENDPROC(cpu_do_idle)

/*
 *	cpu_do_switch_mm(pgd_phys, tsk)
 *
 *	Set the translation table base pointer to be pgd_phys.
 *
 *	- pgd_phys - physical address of new TTB
 */
ENTRY(cpu_do_switch_mm)
    mrs	x2, ttbr1_el1
    mmid	x1, x1				// get mm->context.id
    phys_to_ttbr x3, x0

    cbz     x1, 1f                          // skip CNP for reserved ASID
    orr     x3, x3, #TTBR_CNP_BIT
1:
    bfi	x2, x1, #48, #16		// set the ASID
    msr	ttbr1_el1, x2			// in TTBR1 (since TCR.A1 is set)
    isb
    msr	ttbr0_el1, x3			// now update TTBR0
    isb
    b	post_ttbr_update_workaround	// Back to C code...
ENDPROC(cpu_do_switch_mm)

    .pushsection ".idmap.text", "awx"

.macro	__idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
    adrp	\tmp1, empty_zero_page
    phys_to_ttbr \tmp2, \tmp1
    offset_ttbr1 \tmp2
    msr	ttbr1_el1, \tmp2
    isb
    tlbi	vmalle1
    dsb	nsh
    isb
.endm

/*
 * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)
 *
 * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
 * called by anything else. It can only be executed from a TTBR0 mapping.
 */
ENTRY(idmap_cpu_replace_ttbr1)
    save_and_disable_daif flags=x2

    __idmap_cpu_set_reserved_ttbr1 x1, x3

    offset_ttbr1 x0
    msr	ttbr1_el1, x0
    isb

    restore_daif x2

    ret
ENDPROC(idmap_cpu_replace_ttbr1)
    .popsection

/*
 *	__cpu_setup
 *
 *	Initialise the processor for turning the MMU on.  Return in x0 the
 *	value of the SCTLR_EL1 register.
 */
    .pushsection ".idmap.text", "awx"
ENTRY(__cpu_setup)
    tlbi	vmalle1				// Invalidate local TLB
    dsb	nsh

    mov	x0, #3 << 20
    msr	cpacr_el1, x0			// Enable FP/ASIMD
    mov	x0, #1 << 12			// Reset mdscr_el1 and disable
    msr	mdscr_el1, x0			// access to the DCC from EL0
    isb					// Unmask debug exceptions now,
    enable_dbg				// since this is per-cpu
    reset_pmuserenr_el0 x0			// Disable PMU access from EL0
    /*
     * Memory region attributes for LPAE:
     *
     *   n = AttrIndx[2:0]
     *			n	MAIR
     *   DEVICE_nGnRnE	000	00000000
     *   DEVICE_nGnRE	001	00000100
     *   DEVICE_GRE		010	00001100
     *   NORMAL_NC		011	01000100
     *   NORMAL		    100	11111111
     *   NORMAL_WT		101	10111011
     */
    ldr	x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
             MAIR(0x04, MT_DEVICE_nGnRE) | \
             MAIR(0x0c, MT_DEVICE_GRE) | \
             MAIR(0x44, MT_NORMAL_NC) | \
             MAIR(0xff, MT_NORMAL) | \
             MAIR(0xbb, MT_NORMAL_WT)
    msr	mair_el1, x5
    /*
     * Prepare SCTLR
     */
    mov_q	x0, SCTLR_EL1_SET
    /*
     * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
     * both user and kernel.
     */
    ldr	x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
            TCR_TG_FLAGS | TCR_ASID16 | \
            TCR_TBI0 | TCR_A1

    ldr_l		x9, idmap_t0sz
    tcr_set_t0sz	x10, x9

    /*
     * Set the IPS bits in TCR_EL1.
     */
    tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6
#ifdef CONFIG_ARM64_HW_AFDBM
    /*
     * Enable hardware update of the Access Flags bit.
     * Hardware dirty bit management is enabled later,
     * via capabilities.
     */
    mrs	x9, ID_AA64MMFR1_EL1
    and	x9, x9, #0xf
    cbz	x9, 1f
    orr	x10, x10, #TCR_HA		// hardware Access flag update
1:
#endif	/* CONFIG_ARM64_HW_AFDBM */
    msr	tcr_el1, x10
    ret					// return to head.S
ENDPROC(__cpu_setup)
