/*
 * Branch according to exception level
 */
#include "sdkconfig.h"

.section StartUp, "ax"
.section .vectors
.globl	_boot
.global _boot
.type _boot, "function"
_boot:

    mrs	x0, CurrentEL
    cmp x0, 0xc
    b.eq    el3_entry
    cmp	x0, 0x8
    b.eq	el2_entry
    cmp	x0, 0x4
    b.eq	el1_entry
    bne	hang
el3_entry:
    // initialize sctlr_el2 and hcr_el2 to save values before entering el2.
    msr sctlr_el2, xzr
    msr hcr_el2, xzr
    // determine the el2 execution state.
    mrs x0, scr_el3
    orr x0, x0, #(1<<10) // rw el2 execution state is aarch64.
    orr x0, x0, #(1<<0) // ns el1 is non-secure world.
    msr scr_el3, x0
    mov x0, #0b01001 // daif=0000 
    msr spsr_el3, x0 // m[4:0]=01001 el2h must match scr_el3.rw
    // determine el2 entry.
    adr x0, el2_entry // el2_entry points to the first instruction of 
    msr elr_el3, x0 // el2 code.
    eret
el2_entry:
    // initialize the sctlr_el1 register before entering el1.
    msr sctlr_el1, xzr
    mrs x0, hcr_el2
    orr x0, x0, #(1<<31) // rw=1 el1 execution state is aarch64.
    msr hcr_el2, x0
    mov x0, #0b00101 // daif=0000 
    msr spsr_el2, x0 // m[4:0]=00101 el1h must match hcr_el2.rw.
    adr x0, el1_entry // el1_entry points to the first instruction of 
    msr elr_el2, x0 // el1 code.
    eret
el1_entry:

    adr	x0, _vector_table
    msr	vbar_el1, x0

	mov	x0, #3 << 20
	msr	cpacr_el1, x0			/* Enable FP/SIMD */


master_cpu:
    mov	x1,#0x0
    orr     x1, x1, #(1 << 18)      // ; Set WFE non trapping
    orr     x1, x1, #(1 << 17)      // ; Set WFI non trapping
    orr     x1, x1, #(1 << 5)       // ; Set CP15 barrier enabled
    msr     SCTLR_EL1, x1
    isb

	msr	DAIFclr, 0xF
	/* configure stack */
	adrp	x0, stack_top	// Address of 4KB page at a PC-relative offset
magic_label:				// Why do we need this label to let GDB step continually?
    mov	sp, x0				// sp = stack_top (align with 4KB page)

    /* Start copying data */
    ldr x0, =_rom_end
    ldr x1, =__data_start
    ldr x2, =__data_end

data_loop:
    cmp x1, x2
    ldp	x10, x11, [x0], #16	/* copy from source address [x1] */
	stp	x10, x11, [x1], #16	/* copy to   target address [x0] */
    blt data_loop

	/* clear bss. */
	ldr	x1, =__bss_start__	/* A 64-bit general-purpose register named X0 to X30 */
	ldr	w2, =__bss_size		/* A 32-bit general-purpose register named W0 to W30 */
1:	cbz	w2,	2f				/* Compare and Branch on Zero */
    str   xzr, [x1], #8
    sub   w2, w2, #1
    cbnz  w2, 1b

#ifdef CONFIG_USE_LIBC
    // Set up _fini and fini_array to be called at exit
    ldr x0, =__libc_fini_array
    bl  atexit
    // Call preinit_array, _init and init_array
    bl  __libc_init_array
#endif

#ifdef CONFIG_USE_CACHE
TLBI VMALLE1            //; TLBI VMALLE1, TLB Invalidate by VMID, All at stage 1, EL1
ic IALLU                //; Invalidate I cache to PoU

#ifdef CONFIG_USE_MMU
bl MmuInit
#endif
bl FCacheDCacheInvalidate

// Enable caches and the MMU.
MRS X0, SCTLR_EL1
ORR X0, X0, #(0x1 << 2) // The C bit (data cache).
ORR X0, X0, #(0x1 << 12) // The I bit (instruction cache).
#ifdef CONFIG_USE_MMU
ORR X0, X0, #0x1 // The M bit (MMU).
#endif
MSR SCTLR_EL1, X0
DSB SY
ISB

dsb ish                 // ensure all previous stores have completed before invalidating
ic  ialluis             // I cache invalidate all inner shareable to PoU (which includes secondary cores)
dsb ish              // ensure completion on inner shareable domain   (which includes secondary cores)
isb

#endif

#ifdef CONFIG_USE_LIBC
    // Set up the standard file handles
    bl  initialise_monitor_handles
#endif
2:

    bl InterruptEarlyInit
	bl	main

hang:
	wfi
	b	hang






# =========================================
