/*
 * Copyright (c) 2008, Artur Emagulov
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the name of the author nor the names of any co-contributors
 *    may be used to endorse or promote products derived from this software
 *    without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 */

/*
 * 23.08.2008
 * sys/arch/arm/atoms.S - low level code for ARM platform
 */


#include <arm/platform.h>
#include <arm/asm.h>

	.extern syscall_table
	.globl exception_vector_start
	.globl exception_vector_end

	.section ".text"
	.code 32

ENTRY(exception_vector_start)
	/*
	 * Exception vector
	 *
	 * This table will be copied to an appropriate location.
	 * (the location is platform specific.)
	 */
	ldr	pc, reset_target		/* 0x00 mode: svc */
	ldr	pc, undefined_target	/* 0x04 mode: ? */
	ldr	pc, swi_target			/* 0x08 mode: svc */
	ldr	pc, prefetch_target		/* 0x0c mode: abort */
	ldr	pc, abort_target		/* 0x10 mode: abort */
	nop							/* 0x14 reserved */
	ldr	pc, irq_target			/* 0x18 mode: irq */
	ldr	pc, fiq_target			/* 0x1c mode: fiq */

reset_target:		.word	reset_entry
undefined_target:	.word	undefined_entry
swi_target:			.word	syscall_entry
prefetch_target:	.word	prefetch_entry
abort_target:		.word	abort_entry
irq_target:			.word	interrupt_entry
fiq_target:			.word	fiq_entry

exception_vector_end:
	.word exception_vector_end
	.align 4

/*
 * TODO IMPORTANT!!! Need to reorganize exception handlers!!!
 */
reset_entry:
	b reset_entry			/* prevent return */


	/*
	 * Setup stack pointer for each processor mode
	 */
	.global stack_setup
ENTRY(stack_setup)
__irq_stack_setup:
	mov	r0, #(PSR_IRQ_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS)
	msr	cpsr_c, r0
	ldr	sp, =__irq_stack_top	/* Set IRQ mode stack */
__fiq_stack_setup:
	mov	r0, #(PSR_FIQ_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS)
	msr	cpsr_c, r0
	ldr	sp, =__fiq_stack_top	/* Set FIQ mode stack */
__abt_stack_setup:
	mov	r0, #(PSR_ABT_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS)
	msr	cpsr_c, r0
	ldr	sp, =__abt_stack_top	/* Set Abort mode stack */
__und_stack_setup:
	mov	r0, #(PSR_UND_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS)
	msr	cpsr_c, r0
	ldr	sp, =__und_stack_top	/* Set Undefined mode stack */
__sys_stack_setup:
	mov	r0, #(PSR_SYS_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS)
	msr	cpsr, r0
	ldr	sp, =__sys_stack_top	/* Set SYS mode stack */
__svc_stack_setup:
	mov	r0, #(PSR_SVC_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS)
	msr	cpsr, r0
	ldr	sp, =__svc_stack_top	/* Set SVC mode stack */
	mov pc, lr;		/* return*/
    .size   stack_setup, . -stack_setup

    /**
     *
     */
#if 0
    .globl __aeabi_uidivmod
ENTRY(__aeabi_uidivmod)
   stmfd   sp!, {r0, r1, ip, lr}
   bl   __aeabi_uidiv
   ldmfd   sp!, {r1, r2, ip, lr}
   mul   r3, r0, r2
   sub   r1, r1, r3
   mov   pc, lr
#endif

/*
 * TODO Due to recommendations from ARM Technical Support Articles \
 * this ISR must be reentrant, and should be more complex. Possibly \
 * it should use SYS-mode, instead of SVC, to branch to sytem's ISR.
 *
 * Interrupt entry point
 */
 	.global interrupt_ret
ENTRY(interrupt_entry)
	/* In each interrupt an ISR stack points to a top,
	 * We don't care about IRQ sp, it's not modifyied.
	 */
	stmfd	sp, {r0-r4}			/* Save work registers in IRQ stack */
	sub	r2, lr, #4				/* r2: pc, save return instruction address */
	mrs	r3, spsr				/* r3: cpsr, save PSR of an interrupted task */
	sub	r4, sp, #(4*5)			/* save IRQ stack bottom */

	mrs	r0, cpsr				/* Set processor to SVC mode */
	bic	r0, r0, #PSR_MOD_BITS
	/* in nested mode interrupts still disabled */
	orr	r0, r0, #(PSR_SVC_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS)
	msr	cpsr_c, r0
	mov	r0, sp					/* SVC stack pointer is in use now */
	mov	r1, lr					/* SVC's lr... */

	/* prepare 'cpu_regs' structure */
	stmfd	sp!, {r0-r3}		/* Push svc_sp, svc_lr, pc, cpsr */
	ldmfd	r4, {r0-r4}			/* Restore work registers */
	sub		sp, sp, #(4*15)
	stmia	sp, {r0-r14}^		/* Push r0-r14 */
	nop							/* Instruction gap for stm^ */

	mov r0, sp					/* r0 now points to 'cpu_regs' */
	bl	__up_interrupt_handler		/* Call main interrupt handler */

interrupt_ret:
	mov r0, sp					/* save sp, r0 possibly corrupted after '__up_interrupt_handler' */
 	ldr	lr, [r0, #(4*16)]		/* Restore SVC lr */
 	ldr	sp, [r0, #(4*15)]		/* Restore SVC sp */

	mrs	r1, cpsr				/* Set processor to IRQ mode */
	bic	r1, r1, #PSR_MOD_BITS
	orr	r1, r1, #(PSR_IRQ_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS)
	msr	cpsr_c, r1

 	ldr	lr, [r0, #(4*17)]		/* Restore return instruction address */
 	ldr	r1, [r0, #(4*18)]		/* Restore PSR of an interrupted task */
 	msr	spsr_all, r1
 	ldmfd	r0, {r0-r14}^		/* Restore user mode registers */
	nop							/* Instruction gap for ldm^ */
	movs pc, lr					/* Exit, with restoring cpsr */

/*
 * System call entry
 * r0,r1,r2,r3 - can be used by syscall, so will do not touch them
 */
	.global syscall_ret
ENTRY(syscall_entry)

	sub	sp, sp, #(4*19)		/* Adjust stack */
	stmia	sp, {r0-r14}^	/* Push r0-r14 and save cpsr: cpsr -> spsr */
	nop						/* Instruction gap for stm^ */
	add	r5, sp, #(4*19)		/*  */
	str	r5, [sp, #(4*15)]	/* Push svc_sp */
	str	lr, [sp, #(4*17)]	/* Push pc */
	mrs	r5, spsr			/* Push cpsr */
	str	r5, [sp, #(4*18)]

#if 0
    ldr     r4, [lr, #-4]           /* Get swi opcode */
 	bic     r4, r4, #0xff000000		/* Fetch syscall number from opcode */

	ldr	r5, =syscall_table
	ldr	r4, [r5, r4, lsl #2]	/* Get syscall handler */
	mov	lr, pc				/* set address of next instruction as the return value for syscall */
	mov	pc, r4				/* Syscall returned here */
	str	r0, [sp]			/* r0 stores syscall result. Set it into r0 of interrupted context */
#else
    mov r0, sp					/* r0 now points to 'cpu_regs' */
    bl	__up_syscall_handler		/* Call main syscall handler */
    str	r0, [sp]
#endif

syscall_ret:
	mov	r5, sp
	ldr	r1, [r5, #(4*18)]	/* Restore cpsr */
	msr	spsr_all, r1
	ldr	lr, [r5, #(4*17)]	/* Restore pc (lr) */
	ldr	sp, [r5, #(4*15)]	/* Restore svc_sp */
	ldmfd	r5, {r0-r14}^	/* Restore user mode registers */
	nop						/* Instruction gap for ldm^ */
	movs	pc, lr			/* Exit, with restoring cpsr */



/*
 * TODO: Need to handle following exceptions.
 * TODO Each abort must cause the system to \
 * analyze (at least to save) the state that preceeded \
 * an abort, that is to provide the system \
 * adaptation and/or self-learning.
 */
 	.global undefined_ret
ENTRY(undefined_entry)
	sub	sp, sp, #(4*19)		/* Adjust stack */
	stmia	sp, {r0-r14}^	/* Push r0-r14 and save cpsr: cpsr -> spsr */
	nop						/* Instruction gap for stm^ */
	add	r5, sp, #(4*19)		/*  */
	str	r5, [sp, #(4*15)]	/* Push svc_sp */
	str	lr, [sp, #(4*17)]	/* Push pc */
	mrs	r5, spsr			/* Push cpsr */
	str	r5, [sp, #(4*18)]

	mov r0, sp					/* r0 now points to 'cpu_regs' */
	bl	__up_undefined_handler		/* Call main interrupt handler */

undefined_ret:
	mov	r5, sp
	ldr	r1, [r5, #(4*18)]	/* Restore cpsr */
	msr	spsr_all, r1
	ldr	lr, [r5, #(4*17)]	/* Restore pc (lr) */
	ldr	sp, [r5, #(4*15)]	/* Restore svc_sp */
	ldmfd	r5, {r0-r14}^	/* Restore user mode registers */
	nop						/* Instruction gap for ldm^ */
	movs	pc, lr			/* Exit, with restoring cpsr */

	.global prefetch_ret
ENTRY(prefetch_entry)
	sub	sp, sp, #(4*19)		/* Adjust stack */
	stmia	sp, {r0-r14}^	/* Push r0-r14 and save cpsr: cpsr -> spsr */
	nop						/* Instruction gap for stm^ */
	add	r5, sp, #(4*19)		/*  */
	str	r5, [sp, #(4*15)]	/* Push svc_sp */
	str	lr, [sp, #(4*17)]	/* Push pc */
	mrs	r5, spsr			/* Push cpsr */
	str	r5, [sp, #(4*18)]

	mov r0, sp					/* r0 now points to 'cpu_regs' */
	bl	__up_prefetch_handler		/* Call main interrupt handler */

prefetch_ret:
	mov	r5, sp
	ldr	r1, [r5, #(4*18)]	/* Restore cpsr */
	msr	spsr_all, r1
	ldr	lr, [r5, #(4*17)]	/* Restore pc (lr) */
	ldr	sp, [r5, #(4*15)]	/* Restore svc_sp */
	ldmfd	r5, {r0-r14}^	/* Restore user mode registers */
	nop						/* Instruction gap for ldm^ */
	movs	pc, lr			/* Exit, with restoring cpsr */


/*
 *
 *
 */
 	.global abort_ret
ENTRY(abort_entry)
	mrs	r0, cpsr				/* Set processor to SVC mode */
	bic	r0, r0, #PSR_MOD_BITS
	/* in nested mode interrupts still disabled */
	orr	r0, r0, #(PSR_SVC_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS)
	msr	cpsr_c, r0
	mov	r0, sp					/* SVC stack pointer is in use now */
	mov	r1, lr					/* SVC's lr... */

	/* prepare 'cpu_regs' structure */
	stmfd	sp!, {r0-r3}		/* Push svc_sp, svc_lr, pc, cpsr */
	ldmfd	r4, {r0-r4}			/* Restore work registers */
	sub		sp, sp, #(4*15)
	stmia	sp, {r0-r14}^		/* Push r0-r14 */
	nop							/* Instruction gap for stm^ */

	mov r0, sp					/* r0 now points to 'cpu_regs' */
	bl	__up_abort_handler		/**/

abort_ret:
#if 1
	mov r0, sp					/* save sp, r0 possibly corrupted after '__up_abort_handler' */
 	ldr	lr, [r0, #(4*16)]		/* Restore SVC lr */
 	ldr	sp, [r0, #(4*15)]		/* Restore SVC sp */

	mrs	r1, cpsr				/* Set processor to IRQ mode */
	bic	r1, r1, #PSR_MOD_BITS
	orr	r1, r1, #(PSR_ABT_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS)
	msr	cpsr_c, r1

 	ldr	lr, [r0, #(4*17)]		/* Restore return instruction address */
 	ldr	r1, [r0, #(4*18)]		/* Restore PSR of an interrupted task */
 	msr	spsr_all, r1
 	ldmfd	r0, {r0-r14}^		/* Restore user mode registers */
	nop							/* Instruction gap for ldm^ */
	movs pc, lr					/* Exit, with restoring cpsr */
#else
	mov	r5, sp
	ldr	r1, [r5, #(4*18)]	/* Restore cpsr */
	msr	spsr_all, r1
	ldr	lr, [r5, #(4*17)]	/* Restore pc (lr) */
	ldr	sp, [r5, #(4*15)]	/* Restore svc_sp */
	ldmfd	r5, {r0-r14}^	/* Restore user mode registers */
	nop						/* Instruction gap for ldm^ */
	movs	pc, lr			/* Exit, with restoring cpsr */
#endif

/*
 *
 *
 */
ENTRY(fiq_entry)
	b	fiq_entry		/* hang... */

/*
 * Switch register context.
 * r0 = previous kern_regs, r1 = next kern_regs
 * Interrupts must be disabled by caller.
 *
 * syntax - void cpu_switch(kern_regs *prev, kern_regs *next)
 *
 * Note: GCC uses r0-r3,r12 as scratch registers
 */
ENTRY(cpu_switch)
	stmia	r0, {r4-r11, sp, lr}	/* Save previous register context */
	ldmia	r1, {r4-r11, sp, pc}	/* Restore next register context */

ENTRY(interrupt_common)
	mov	pc,	lr

ENTRY(flush_tlb)
	mov	r0, #0x0
	mcr p15, 0, r0, c8, c7, 0	/* Invalidate all the TLB */
	mov	pc,	lr

ENTRY(umem_copyin)
	mov	pc,	lr

ENTRY(umem_copyout)
	mov	pc,	lr

ENTRY(umem_strnlen)
	mov	pc,	lr

ENTRY(umem_fault)
	mov	pc,	lr

ENTRY(cpu_reset)
	mov	pc,	lr

ENTRY(__outl)
	mov	pc,	lr

ENTRY(__outw)
	mov	pc,	lr

ENTRY(__outb)
	mov	pc,	lr

ENTRY(__inl)
	mov	pc,	lr

ENTRY(__inw)
	mov	pc,	lr

ENTRY(__inb)
	mov	pc,	lr


ENTRY(interrupt_enable)
	mrs	r0, cpsr
	bic	r0, r0, #0xc0
	msr	cpsr, r0
	mov	pc, lr

ENTRY(interrupt_disable)
	mrs	r0, cpsr
	orr	r0, r0, #0xc0
	msr	cpsr, r0
	mov	pc, lr

ENTRY(interrupt_save)
	mrs	r1, cpsr
	str	r1, [r0]
	mov	pc, lr

ENTRY(interrupt_restore)
	msr	cpsr_c, r0
	mov	pc, lr

/* just for debugging IRQ mode */
ENTRY(interrupt_debug)
	mrs	r0, cpsr				/* Set processor to IRQ mode */
	mov r1, r0
	bic	r0, r0, #PSR_MOD_BITS
	orr	r0, r0, #PSR_IRQ_MODE
	msr	cpsr, r0
	msr	cpsr, r1			/* switch back */
	mov	pc, lr


ENTRY(cpu_idle)
	/*
	 * TODO Power management
	 * WFI('Wait for interrupt') instruction, see "ARM926EJ-S™ Technical Reference Manual"
	 * section. 12.1.1. 'Dynamic power management (wait for interrupt mode)'
	 * NOTE For different ARM processors it should be also different 'cpu_idle'
	 */
	mov	r0, #0x0
	mcr p15, 0, r0, c7, c0, 4
	/* ldr	pc, undefined_target */
	mov	pc,	lr

ENTRY(get_p15_c0)
	mrc	p15, 0, r0, c0, c0
	mov pc,lr

ENTRY(get_p15_c1)
	mrc	p15, 0, r0, c1, c0
	mov pc,lr

ENTRY(get_p15_c2)
	mrc	p15, 0, r0, c2, c0
	mov pc,lr

ENTRY(get_p15_c6)
	mrc	p15, 0, r0, c6, c0, 0
	mov pc,lr

ENTRY(get_p15_c6_1)
	mrc	p15, 0, r0, c6, c0, 1
	mov pc,lr

ENTRY(get_p15_c5)
	mrc	p15, 0, r0, c5, c0, 0
	mov pc,lr

ENTRY(set_p15_c0)
	mcr p15, 0, r0, c0, c0, 0
	mov pc,lr

ENTRY(set_p15_c1)
	mcr p15, 0, r0, c1, c0, 0
	mov pc,lr

ENTRY(set_p15_c2)
	mcr p15, 0, r0, c2, c0, 0
	mov pc,lr

.end
