/*
 * Low-level exception handling code
 *
 * Copyright (C) 2012 ARM Ltd.
 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
 *		Will Deacon <will.deacon@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
#include <generated/asm-offsets.h>
#include <seminix/init.h>
#include <seminix/linkage.h>
#include <asm/processor.h>
#include <asm/assembler.h>
#include <asm/thread.h>
#include <asm/esr.h>
#include <asm/debug.h>

    .macro	clear_gp_regs
    .irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
    mov	x\n, xzr
    .endr
    .endm

/*
 * Bad Abort numbers
 *-----------------
 */
#define BAD_SYNC	0
#define BAD_IRQ		1
#define BAD_FIQ		2
#define BAD_ERROR	3

    .macro kernel_ventry, el, label, regsize = 64
    .align 7

    sub	sp, sp, #S_FRAME_SIZE

    b	el\()\el\()_\label
    .endm

    .macro	kernel_entry, el, regsize = 64
    .if	\regsize == 32
    mov	w0, w0				// zero upper 32 bits of x0
    .endif
    stp	x0, x1, [sp, #16 * 0]
    stp	x2, x3, [sp, #16 * 1]
    stp	x4, x5, [sp, #16 * 2]
    stp	x6, x7, [sp, #16 * 3]
    stp	x8, x9, [sp, #16 * 4]
    stp	x10, x11, [sp, #16 * 5]
    stp	x12, x13, [sp, #16 * 6]
    stp	x14, x15, [sp, #16 * 7]
    stp	x16, x17, [sp, #16 * 8]
    stp	x18, x19, [sp, #16 * 9]
    stp	x20, x21, [sp, #16 * 10]
    stp	x22, x23, [sp, #16 * 11]
    stp	x24, x25, [sp, #16 * 12]
    stp	x26, x27, [sp, #16 * 13]
    stp	x28, x29, [sp, #16 * 14]

    .if	\el == 0
    clear_gp_regs
    mrs	x21, sp_el0
    ldr_this_cpu	tsk, __entry_task, x20	// Ensure MDSCR_EL1.SS is clear,

    .else
    add	x21, sp, #S_FRAME_SIZE
    get_thread_info tsk
    /* Save the task's original addr_limit and set USER_DS */
    ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
    str	x20, [sp, #S_ORIG_ADDR_LIMIT]
    mov	x20, #USER_DS
    str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
    /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
    .endif /* \el == 0 */
    mrs	x22, elr_el1
    mrs	x23, spsr_el1
    stp	lr, x21, [sp, #S_LR]

    /*
     * In order to be able to dump the contents of struct pt_regs at the
     * time the exception was taken (in case we attempt to walk the call
     * stack later), chain it together with the stack frames.
     */
    .if \el == 0
    stp	xzr, xzr, [sp, #S_STACKFRAME]
    .else
    stp	x29, x22, [sp, #S_STACKFRAME]
    .endif
    add	x29, sp, #S_STACKFRAME

    stp	x22, x23, [sp, #S_PC]

    /* Not in a syscall by default (el0_svc overwrites for real syscall) */
    .if	\el == 0
    mov	w21, #NO_SYSCALL
    str	w21, [sp, #S_SYSCALLNO]
    .endif

    /*
     * Set sp_el0 to current thread_info.
     */
    .if	\el == 0
    msr	sp_el0, tsk
    .endif

    /*
     * Registers that may be useful after this macro is invoked:
     *
     * x21 - aborted SP
     * x22 - aborted PC
     * x23 - aborted PSTATE
    */
    .endm

    .macro	kernel_exit, el
    .if	\el != 0
    disable_daif

    /* Restore the task's original addr_limit. */
    ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
    str	x20, [tsk, #TSK_TI_ADDR_LIMIT]

    /* No need to restore UAO, it will be restored from SPSR_EL1 */
    .endif

    ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
    .if	\el == 0
    .endif

    .if	\el == 0
    ldr	x23, [sp, #S_SP]		// load return stack pointer
    msr	sp_el0, x23
    tst	x22, #PSR_MODE32_BIT		// native task?
    b.eq	3f

#ifdef CONFIG_ARM64_ERRATUM_845719
#ifdef CONFIG_PID_IN_CONTEXTIDR
    mrs	x29, contextidr_el1
    msr	contextidr_el1, x29
#else
    msr contextidr_el1, xzr
#endif
#endif
3:
    .endif

    msr	elr_el1, x21			// set up the return data
    msr	spsr_el1, x22
    ldp	x0, x1, [sp, #16 * 0]
    ldp	x2, x3, [sp, #16 * 1]
    ldp	x4, x5, [sp, #16 * 2]
    ldp	x6, x7, [sp, #16 * 3]
    ldp	x8, x9, [sp, #16 * 4]
    ldp	x10, x11, [sp, #16 * 5]
    ldp	x12, x13, [sp, #16 * 6]
    ldp	x14, x15, [sp, #16 * 7]
    ldp	x16, x17, [sp, #16 * 8]
    ldp	x18, x19, [sp, #16 * 9]
    ldp	x20, x21, [sp, #16 * 10]
    ldp	x22, x23, [sp, #16 * 11]
    ldp	x24, x25, [sp, #16 * 12]
    ldp	x26, x27, [sp, #16 * 13]
    ldp	x28, x29, [sp, #16 * 14]
    ldr	lr, [sp, #S_LR]
    add	sp, sp, #S_FRAME_SIZE		// restore sp

    eret
    sb
    .endm

    .macro	irq_stack_entry
    mov	x19, sp			// preserve the original sp

    /*
     * Compare sp with the base of the task stack.
     * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
     * and should switch to the irq stack.
     */
    ldr	x25, [tsk, TSK_STACK]
    eor	x25, x25, x19
    and	x25, x25, #~(THREAD_SIZE - 1)
    cbnz	x25, 9998f

    ldr_this_cpu x25, irq_stack_ptr, x26
    mov	x26, #IRQ_STACK_SIZE
    add	x26, x25, x26

    /* switch to the irq stack */
    mov	sp, x26
9998:
    .endm

    /*
     * x19 should be preserved between irq_stack_entry and
     * irq_stack_exit.
     */
    .macro	irq_stack_exit
    mov	sp, x19
    .endm

/* GPRs used by entry code */
tsk	.req	x28		// current thread_info

/*
 * Interrupt handling.
 */
    .macro	irq_handler
    ldr_l	x1, handle_arch_irq
    mov	x0, sp
    irq_stack_entry
    blr	x1
    irq_stack_exit
    .endm

/*
 * Exception vectors.
 */
    .pushsection ".entry.text", "ax"

    .align	11
ENTRY(vectors)
END(vectors)
