#include "mm.h"


_head:
 b       _start          /* Executable code */
 .long   0               /* Executable code */
 .quad   _text_offset    /* Image load offset from start of RAM, little endian */
 .quad   _end - _head    /* Effective Image size, little endian (_end defined in link.lds) */
 .quad   0               /* Kernel flags, little endian */
 .quad   0               /* Reserved */
 .quad   0               /* Reserved */
 .quad   0               /* Reserved */
 .ascii  "ARM\x64"       /* Magic number */
 .long   0               /* Reserved (used for PE COFF offset) */

 /* Variable registers: x21~x28 */
dtb_paddr .req x21
boot_arg0 .req x22
boot_arg1 .req x23
boot_arg2 .req x24
stack_top .req x25

.macro get_phy, reg, symbol
    adrp    \reg, \symbol
    add     \reg, \reg, #:lo12:\symbol
.endm

.macro get_pvoff, tmp, out
     ldr     \tmp, =.boot_cpu_stack_top
     get_phy \out, .boot_cpu_stack_top
     sub     \out, \out, \tmp
.endm

.macro	get_this_cpu_offset, dst
	mrs	\dst, tpidr_el1
.endm

/*
	 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
	 * @sym: The name of the per-cpu variable
	 * @tmp: scratch register
	 */
.macro adr_this_cpu, dst, sym, tmp
	adrp	\tmp, \sym
	add	\dst, \tmp, #:lo12:\sym
	get_this_cpu_offset \tmp
	add	\dst, \dst, \tmp
.endm


.GLOBL _start
_start:
/*
 * Boot CPU general-purpose register settings:
 *   x0 = physical address of device tree blob (dtb) in system RAM.
 *   x1 = 0 (reserved for future use)
 *   x2 = 0 (reserved for future use)
 *   x3 = 0 (reserved for future use)
 */
    mov     dtb_paddr, x0
    mov     boot_arg0, x1
    mov     boot_arg1, x2
    mov     boot_arg2, x3

    
      /* Save cpu stack */
      get_phy stack_top, .boot_cpu_stack_top
      /* Save cpu id temp */
      msr     tpidr_el1, xzr

     bl      init_cpu_el
     bl      init_kernel_bss
     bl      init_cpu_stack_early
#####################################################################
     mrs x0, mpidr_el1
     and x0, x0, #0xFF
     cbz x0, master
     b   proc_hang

proc_hang:
      b proc_hang

master:
     adr x0, __bss_start
     adr x1, __bss_end
     sub x1, x1, X0
     bl memzero

     mov sp, #LOW_MEMORY
     bl  kernel_main
     b  proc_hang

init_cpu_el:
   mrs     x0, CurrentEL           /* CurrentEL Register. bit 2, 3. Others reserved */
   lsr     x0, x0, #2
   and     x0, x0, #3

   cmp     x0, #3
   bne     .init_cpu_hyp

   mov     x1, #(1 << 0)           /* EL0 and EL1 are in Non-Secure state */
   orr     x1, x1, #(1 << 4)       /* RES1 */
   orr     x1, x1, #(1 << 5)       /* RES1 */
   orr     x1, x1, #(1 << 10)      /* The next lower level is AArch64 */
   msr     scr_el3, x1

   mov     x1, #9                  /* Next level is 0b1001->EL2h */
   orr     x1, x1, #(1 << 6)       /* Mask FIQ */
   orr     x1, x1, #(1 << 7)       /* Mask IRQ */
   orr     x1, x1, #(1 << 8)       /* Mask SError */
   orr     x1, x1, #(1 << 9)       /* Mask Debug Exception */
   msr     spsr_el3, x1

   get_phy x1, .init_cpu_hyp
   msr     elr_el3, x1
   eret 

 
.init_cpu_hyp:
    cmp     x0, #2                  /* EL1 = 0100  */
    bne     .init_cpu_sys

    /* Enable CNTP for EL1 */
    mrs     x0, cnthctl_el2         /* Counter-timer Hypervisor Control register */
    orr     x0, x0, #(1 << 0)       /* Don't traps NS EL0/1 accesses to the physical counter */
    orr     x0, x0, #(1 << 1)       /* Don't traps NS EL0/1 accesses to the physical timer */
    msr     cnthctl_el2, x0
    msr     cntvoff_el2, xzr

    mov     x0, #(1 << 31)          /* Enable AArch64 in EL1 */
    orr     x0, x0, #(1 << 1)       /* SWIO hardwired */
    msr     hcr_el2, x0

    mov     x0, #5                  /* Next level is 0b0101->EL1h */
    orr     x0, x0, #(1 << 6)       /* Mask FIQ */
    orr     x0, x0, #(1 << 7)       /* Mask IRQ */
    orr     x0, x0, #(1 << 8)       /* Mask SError */
    orr     x0, x0, #(1 << 9)       /* Mask Debug Exception */
    msr     spsr_el2, x0

    get_phy x0, .init_cpu_sys
    msr     elr_el2, x0
    eret 

 .init_cpu_sys:
    mrs     x0, sctlr_el1
    orr     x0, x0, #(1 << 12)      /* Enable Instruction */
    bic     x0, x0, #(3 << 3)       /* Disable SP Alignment check */
    bic     x0, x0, #(1 << 1)       /* Disable Alignment check */
    msr     sctlr_el1, x0

    /* Avoid trap from SIMD or float point instruction */
    mov     x0, #0x00300000         /* Don't trap any SIMD/FP instructions in both EL0 and EL1 */
    msr     cpacr_el1, x0

    /* Applying context change */
    dsb     ish
    isb

    ret

  
init_kernel_bss:
    get_phy x1, __bss_start
    get_phy x2, __bss_end
    sub     x2, x2, x1              /* Get bss size */

    and     x3, x2, #7              /* x3 is < 7 */
    ldr     x4, =~0x7
    and     x2, x2, x4              /* Mask ~7 */  

init_cpu_stack_early:
    msr     spsel, #1
    mov     sp, stack_top

    ret   

/*
 * CPU stack builtin
 */
     .section ".bss.noclean.cpus_stack"
     .align 12
 .cpus_stack:
 #if defined(USING_SMP) && CPUS_NR > 1
     .space (ARCH_SECONDARY_CPU_STACK_SIZE * (CPUS_NR - 1))
 .secondary_cpu_stack_top:
 #endif
     .space ARCH_SECONDARY_CPU_STACK_SIZE
 .boot_cpu_stack_top: