/*
 * (C) Copyright 2013
 * David Feng <fenghua@phytium.com.cn>
 *
 * SPDX-License-Identifier:	GPL-2.0+
 */

#include <asm-offsets.h>
#include <config.h>
#include <version.h>
#include <linux/linkage.h>
#include <asm/macro.h>
#include <asm/armv8/mmu.h>

/*************************************************************************
 *
 * Startup Code (reset vector)
 *
 *************************************************************************/

.globl	_start
_start:
	b	reset

	.align 3

.globl	_TEXT_BASE
_TEXT_BASE:
	.quad	CONFIG_SYS_TEXT_BASE

/*
 * These are defined in the linker script.
 */
.globl	_end_ofs
_end_ofs:
	.quad	_end - _start

.globl	_bss_start_ofs
_bss_start_ofs:
	.quad	__bss_start - _start

.globl	_bss_end_ofs
_bss_end_ofs:
	.quad	__bss_end - _start

reset:
	/*
	 * Could be EL3/EL2/EL1
	 */
	mrs	x0, CurrentEL
	cmp	x0, #0xc
	b.ne	reset_nonsecure			/* Not EL3 */

	bl	setup_el3			/* EL3 initialization */

	/*
	 * MMU Disabled, iCache Disabled, dCache Disabled
	 */
reset_nonsecure:

#ifdef CONFIG_BOOTING_EL1
	branch_if_el2 x0, 1f
	b	2f
1:	bl	setup_el2			/* EL2 initialization */
2:
#endif

	/* Initialize vBAR/CPACR_EL1/MDSCR_EL1 */
	adr	x0, vectors
	branch_if_el2 x1, 1f
	msr	vbar_el1, x0
	mov	x0, #3 << 20
	msr	cpacr_el1, x0			/* Enable FP/SIMD */
	msr	mdscr_el1, xzr
	b	2f
1:	msr	vbar_el2, x0
2:

	/* Cache/BPB/TLB Invalidate */
	bl	__asm_flush_dcache_all		/* dCache clean & invalidate */
	bl	__asm_invalidate_icache_all	/* iCache invalidate */
	bl	__asm_invalidate_tlb_all	/* invalidate TLBs */

	/* Processor specific initialization */
	bl	lowlevel_init

	branch_if_slave	x0, slave_cpu

	/*
	 * Master CPU
	 */
master_cpu:
	bl	_main

	/*
	 * Slave CPUs
	 */
slave_cpu:
	wfe
	ldr	x1, =CPU_RELEASE_ADDR
	ldr	x0, [x1]
	cbz	x0, slave_cpu
	br	x0			/* branch to the given address */

/*-------------------------------------------------------------------------*/

WEAK(setup_el3)
	mov	x0, #0x531	/* Non-secure EL0/EL1 | HVC | 64bit EL2 */
	msr	scr_el3, x0
	msr	cptr_el3, xzr	/* Disable coprocessor traps to EL3 */

	/* GIC initialization */
	branch_if_slave	x0, 2f

	/* Master initialize distributor */
	ldr	x1, =GICD_BASE		/* GICD_CTLR */
	mov	w0, #0x3		/* Enable Group0 & Group1 */
	str	w0, [x1]
	ldr	w0, [x1, #0x4]		/* GICD_TYPER */
	and	w2, w0, #0x1f		/* ITLinesNumber */
	add	w2, w2, #0x1		/* Number of GICD_IGROUPR registers */
	add	x1, x1, #0x80		/* GICD_IGROUPR */
	mov	w0, #~0			/* All Group1 */
1:	str	w0, [x1], #0x4
	sub	w2, w2, #0x1
	cbnz	w2, 1b
	b	3f

	/* Slave initialize distributor */
2:	ldr	x1, =GICD_BASE		/* GICD_CTLR */
	mov	w0, #~0			/* All Group1 */
	str	w0, [x1, #0x80]

	/* Initialize cpu interface */
3:	ldr	x1, =GICC_BASE		/* GICC_CTLR */
	mov	w0, #0x3		/* Enable Group0 & Group1 */
	str	w0, [x1]

	mov	w0, #0x1 << 7		/* Non-Secure access to GICC_PMR */
	str	w0, [x1, #0x4]		/* GICC_PMR */

	/* Counter frequency initialization */
	ldr	x0, =CONFIG_SYS_CNTFRQ
	msr	cntfrq_el0, x0

	/* SCTLR_EL2 initialization */
	msr	sctlr_el2, xzr

	/* Return to the EL2_SP2 mode from EL3 */
	mov	x0, #0x3c9		/* EL2_SP2 | D | A | I | F */
	msr	elr_el3, lr
	msr	spsr_el3, x0
	eret
ENDPROC(setup_el3)

WEAK(setup_el2)
	/* Initialize Generic Timers */
	mrs	x0, cnthctl_el2
	orr	x0, x0, #0x3		/* Enable EL1 access to timers */
	msr	cnthctl_el2, x0
	msr	cntvoff_el2, x0		/* Clear virtual offset */
	mrs	x0, cntkctl_el1
	orr	x0, x0, #0x3		/* EL0 access to counters */
	msr	cntkctl_el1, x0

	/* Initilize MPID/MPIDR registers */
	mrs	x0, midr_el1
	mrs	x1, mpidr_el1
	msr	vpidr_el2, x0
	msr	vmpidr_el2, x1

	/* Disable coprocessor traps */
	mov	x0, #0x33ff
	msr	cptr_el2, x0		/* Disable coprocessor traps to EL2 */
	msr	hstr_el2, xzr		/* Disable CP15 traps to EL2 */

	/* Initialize HCR_EL2 */
	mov	x0, #(1 << 31)		/* 64bit EL1 */
	orr	x0, x0, #(1 << 29)	/* Disable HVC */
	msr	hcr_el2, x0

	/* SCTLR_EL1 initialization */
	mov	x0, #0x0800
	movk	x0, #0x30d0, lsl #16
	msr	sctlr_el1, x0

	/* Return to the EL1_SP1 mode from EL2 */
	mov	x0, #0x3c5		/* EL1_SP1 | D | A | I | F */
	msr	elr_el2, lr
	msr	spsr_el2, x0
	eret
ENDPROC(setup_el2)

WEAK(lowlevel_init)
	branch_if_slave	x0, 1f

	/* Master initialize distributor */
	ldr	x1, =GICD_BASE		/* NonSecure GICD_CTLR */
	mov	w0, #0x1
	str	w0, [x1]		/* Enable Group1 */
	b	3f

1:
	/* Slave initialize distributor & cpu interface */
	ldr	x1, =GICD_BASE		/* NonSecure GICD_CTLR */
	mov	w0, #0x1		/* Enable SGI 0 */
	str	w0, [x1, #0x100]	/* GICD_ISENABLER0 */
	ldr	x1, =GICC_BASE		/* NonSecure GICC_CTLR */
	str	w0, [x1]		/* Enable Group1 */

2:
	/*
	 * Waiting for SGI 0 from master after clearing spin table,
	 * This sync prevent salves observing incorrect value of
	 * spin table and jumping to wrong place.
	 */
	wfi
	ldr	w0, [x1, #0xc]		/* GICC_IAR */
	str	w0, [x1, #0x10]		/* GICC_EOIR */
	cbnz	w0, 2b

3:
	ret
ENDPROC(lowlevel_init)

WEAK(kick_secondary_cpus)
	/* Kick secondary cpus up by SGI 0 interrupt */
	ldr	x1, =GICD_BASE		/* NonSecure GICD_CTLR */
	mov	w0, #0x8000		/* Forward SGI 0 as Group 1 */
	movk	w0, #0x100, lsl #16	/* To all slave processors */
	str	w0, [x1, 0xf00]		/* GICD_SGIR */
	ret
ENDPROC(kick_secondary_cpus)

/*-------------------------------------------------------------------------*/

ENTRY(c_runtime_cpu_setup)
	/* If I-cache is enabled invalidate it */
#ifndef CONFIG_SYS_ICACHE_OFF
	ic	iallu			/* I+BTB cache invalidate */
	isb	sy
#endif

#ifndef CONFIG_SYS_DCACHE_OFF
	/*
	 * Setup MAIR and TCR.
	 */
	ldr	x0, =MEMORY_ATTRIBUTES
	ldr	x1, =TCR_FLAGS

	branch_if_el2 x2, 1f
	orr	x1, x1, TCR_EL1_IPS_BITS
	msr	mair_el1, x0
	msr	tcr_el1, x1
	b	2f
1:	orr	x1, x1, TCR_EL2_IPS_BITS
	msr	mair_el2, x0
	msr	tcr_el2, x1
2:
#endif

	/* Relocate vBAR */
	adr	x0, vectors
	branch_if_el2 x1, 1f
	msr	vbar_el1, x0
	b	2f
1:	msr	vbar_el2, x0
2:

	b	kick_secondary_cpus
ENDPROC(c_runtime_cpu_setup)
