
#include <arch.h>
#include <asm_macros.h>


.global entrypoint
.global _barrier

func entrypoint
	/* ---------------------------------------------
	 * Set the CPU endianness before doing anything
	 * that might involve memory reads or writes.
	 * ---------------------------------------------
	 */
	mrs	x0, sctlr_el3
	bic	x0, x0, #SCTLR_EE_BIT
	msr	sctlr_el3, x0
	isb

	/*
	 * Initialize the register bank
	 */
	mov	x0, xzr
	mov	x1, xzr
	mov	x2, xzr
	mov	x3, xzr
	mov	x4, xzr
	mov	x5, xzr
	mov	x6, xzr
	mov	x7, xzr
	mov	x8, xzr
	mov	x9, xzr
	mov	x10, xzr
	mov	x11, xzr
	mov	x12, xzr
	mov	x13, xzr
	mov	x14, xzr
	mov	x15, xzr
	mov	x16, xzr
	mov	x17, xzr
	mov	x18, xzr
	mov	x19, xzr
	mov	x20, xzr
	mov	x21, xzr
	mov	x22, xzr
	mov	x23, xzr
	mov	x24, xzr
	mov	x25, xzr
	mov	x26, xzr
	mov	x27, xzr
	mov	x28, xzr
	mov	x29, xzr
	mov	x30, xzr

	/*
	 * Zero the stack pointers, link registers
	 * and status registers
	 */
	msr	spsel, #1	/*back to reset state*/
	mov	sp, x0
	msr	sp_el0, xzr
	msr	sp_el1, xzr
	msr	sp_el2, xzr
	msr	elr_el1, xzr
	msr	elr_el2, xzr
	msr	elr_el3, xzr
	msr	spsr_el1, xzr
	msr	spsr_el2, xzr
	msr	spsr_el3, xzr

	/* ---------------------------------------------
	 * Enable the instruction cache, stack pointer
	 * and data access alignment checks
	 * ---------------------------------------------
	 */
	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
	mrs	x0, sctlr_el3
	orr	x0, x0, x1
	msr	sctlr_el3, x0
	isb

    /*
	 * Initialize vector base address register for EL3
	 */
	adr	x1, rom_exceptions
	msr	vbar_el3, x1
	isb

/* --------------------------------------------------------
	 * Early set RES1 bits in SCR_EL3. Set EA bit to catch both
	 * External Aborts and SError Interrupts in EL3.
	 * TODO: set SIF bit to disable instruction fetches from
	 * Non-secure memory ?!
	 * --------------------------------------------------------
	 */
	mov	x0, #(SCR_RES1_BITS | SCR_EA_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT)
	msr	scr_el3, x0

	/* ---------------------------------------------
	 * Enable the SError interrupt now that the
	 * exception vectors have been setup.
	 * ---------------------------------------------
	 */
	msr	daifclr, #DAIF_ABT_BIT

	/* ---------------------------------------------------------------------
	 * The initial state of the Architectural feature trap register
	 * (CPTR_EL3) is unknown and it must be set to a known state. All
	 * feature traps are disabled. Some bits in this register are marked as
	 * Reserved and should not be modified.
	 *
	 * CPTR_EL3.TCPAC: This causes a direct access to the CPACR_EL1 from EL1
	 *  or the CPTR_EL2 from EL2 to trap to EL3 unless it is trapped at EL2.
	 * CPTR_EL3.TTA: This causes access to the Trace functionality to trap
	 *  to EL3 when executed from EL0, EL1, EL2, or EL3. If system register
	 *  access to trace functionality is not supported, this bit is RES0.
	 * CPTR_EL3.TFP: This causes instructions that access the registers
	 *  associated with Floating Point and Advanced SIMD execution to trap
	 *  to EL3 when executed from any exception level, unless trapped to EL1
	 *  or EL2.
	 * ---------------------------------------------------------------------
	 */
	mrs	x0, cptr_el3
	bic	w0, w0, #TCPAC_BIT
	bic	w0, w0, #TTA_BIT
	bic	w0, w0, #TFP_BIT
	msr	cptr_el3, x0

	mrs	x0, mpidr_el1
	bl	plat_get_core_pos
	/* x0 holds CPU number */
	cbnz x0, .L__secondarys_wait

    /*  zero bss */
	ldr	x0, =__bss_start
	ldr	x1, =__bss_size
	bl	bzero

    b .L__stack_setup

.L__secondarys_wait:
    /* wait fot the primary cpu to finish up global setting */
    wfe
    ldr x4, _barrier
    cmp x4, #1
    b.lt .L__secondarys_wait

.L__stack_setup:
	/*
	 * Initialize the stack pointer
	 */
	mrs	x0, mpidr_el1

    bl	plat_set_stack
    bl  mmu_tcr_init
    adr x1, root_page_table
    msr ttbr0_el3, x1
    dsb sy
    ldr x30, =.L__after_mmu_enable_cpux  /* set LR to after_mmu_enable function, it's a v_addr */

    mrs x1, sctlr_el3
    orr x1, x1, #SCTLR_C_BIT
    orr x1, x1, #SCTLR_M_BIT
    msr sctlr_el3, x1       /* enable MMU */

    dsb sy
    isb sy
    ic ialluis      /* Invalidate all instruction caches in Inner Shareable domain to Point of Unification */
    dsb sy
    isb sy
    tlbi vmalle1    /* Invalidate all stage 1 translations used at EL1 with the current VMID */
    dsb sy
    isb sy
    ret

.L__after_mmu_enable_cpux:
    /*
     * start run c code
     */
    b   _init

endfunc entrypoint

.data
.align 3
/*
 * barrier is used to minimal synchronization in boot
 * other cores wait for primary to set it
 */
_barrier: .8byte 0
