# -*- Mode: asm-mode -*-

/*
  S.M.A.C.K - An operating system kernel
  Copyright (C) 2010,2011 Mattias Holm and Kristian Rietveld
  For licensing and a full list of authors of the kernel, see the files
  COPYING and AUTHORS.
*/
        # Mode symbols for use by cps instruction
	.equ USR_MODE, 0b10000
	.equ FIQ_MODE, 0b10001
	.equ IRQ_MODE, 0b10010
	.equ SVC_MODE, 0b10011
	.equ MON_MODE, 0b10110
	.equ ABT_MODE, 0b10111
	.equ UND_MODE, 0b11011
	.equ SYS_MODE, 0b11111

	.text

.global hw_idle
hw_idle:
	push {lr}
	# Wait for interrupt
	wfi
	pop {lr}
	bx lr

.global hw_proc_start
hw_proc_start:
	# When control flow jumps here we are in SVC mode, and we must
	# do the exception return from SVC mode, not SYS mode (SYS mode
	# does not have its own SPSR).

	# We copy the SYS mode lr (which is where we want to jump to)
	# to SVC mode.
	cps #SYS_MODE
	mov r0, lr
	cps #SVC_MODE
	mov lr, r0

	# Set up initial CPSR for this process
	msr SPSR, #0b00010000

	# Execute exeception return
	subs pc, lr, #4


.global hw_return_from_fork
hw_return_from_fork:
	# SVC stack contains (user-space) address to load into lr and jump to.

	# Return zero in r0.
	ldr r0, =0

	# Set SPSR to USR mode
	msr SPSR, #0b00010000

	# Acquire LR from stack
	pop {lr}

	# Return to user mode (put SPSR in CPSR).
	# Continue at address in SVC lr.
	movs pc, lr



# Taken from ARM reference manual, "Example code for cache maintenance
# operations" with modification.
#
# PROCEDURE DOES NOT USE r7, so that TTBR0 value is preserved.
clean_and_invalidate_data_cache:
	mrc p15, 1, r1, c0, c0, 1
	ands r4, r1, #0x7000000
	mov r4, r4, lsr #23
	beq done
	mov r11, #0
loop1:
	add r3, r11, r11, lsr #1
	mov r2, r1, lsr r3
	and r2, r2, #7
	cmp r2, #2
	blt skip
	mcr p15, 2, r11, c0, c0, 0
	isb
	mrc p15, 1, r2, c0, c0, 0
	and r3, r2, #0x7
	add r3, r3, #4
	ldr r5, =0x3ff
	ands r5, r5, r2, lsr #3
	clz r6, r5
	ldr r8, =0x00007fff
	ands r8, r8, r2, lsr #13
loop2:
	mov r10, r5
loop3:
	orr r12, r11, r10, lsl r6
	orr r12, r12, r8, lsl r3
	mcr p15, 0, r12, c7, c14, 2	@ clean and invalidate data cache
	subs r10, r10, #1
	bge loop3
	subs r8, r8, #1
	bge loop2
skip:
	add r11, r11, #2
	cmp r4, r11
	bgt loop1
done:
	mov pc, lr


	# The goal of this function is to store the current context in
	# memory and restore the context of the next process to run from
	# memory.  The pointer to the proc_t structure of the process to
	# restore is stored in r0.  The current process is stored at
	# location r1.
.global cswitch
cswitch:
	#
	# 1. Store state of current process.

	# Store first set of general-purpose registers (gprs)
	stm r1, {r0-r12}

	# Store remainder of state.  Switch to SYS and SVC modes to
	# store the SP and LR of these modes.  Also store the current
	# program state register (cpsr).
	mov r4, lr
	add r4, r4, #4		@ We add 4 to correct for that fact that we
				@ store next instruction + 4 in the pc
				@ field of the CPU ctxt. This way, we
				@ are compatible with the ISR handler.
	cps #SYS_MODE
	mov r2, r13
	mov r3, r14
	cps #SVC_MODE
	mov r5, r13
	mov r6, r14
	mrs r7, cpsr

	# Calculate offset into proc_t (cpu_ctxt_t) to store the remainder
	# of the context.
	mov r8, r1
	add r8, r8, #(13*4)
	stm r8, {r2-r7}

	#
	# 2. Switch to page tables of new process

	# Retrieve ttbr0 of new process into r7.
	# NOTE: KEEP IN SYNC WITH cpu_helper_t
	mov r8, r0
	add r8, r8, #(20*4)
	ldr r7, [r8]

	# Flush instruction pipeline
	isb

	# Clean and invalidate data cache
	# Registers which can be used: r1 and onwards (we no longer need
	# the argument stored in r1), except for r7.
	bl clean_and_invalidate_data_cache
	dsb

	# Set ttbr0 to new value
	mcr p15, 0, r7, c2, c0, 0

	# Now set r7 to zero, even though its value is ignored by the next
	# instructions.
	mov r7, #0x0

	# Flush TLB
	mcr p15, 0, r7, c8, c5, 0
	mcr p15, 0, r7, c8, c6, 0
	mcr p15, 0, r7, c8, c7, 0

	# Invalidate all instruction caches
	mcr p15, 0, r7, c7, c5, 0

	# Invalidate bpc
	mcr p15, 0, r7, c7, c5, 6

	dsb

	# Flush instruction pipeline
	isb

	#
	# 3. Restore state of process to run next

	# Calculate offset into proc_t (cpu_ctxt_t) to restore the different
	# sp, lr and psr registers.
	mov r8, r0
	add r8, r8, #(13*4)

	ldm r8, {r2-r7}

	# The SPSR is moved to the CPSR with the subs instruction below.

	# We will store the next instruction to be run + 4 in the IRQ mode
	# lr register.  We cannot overwrite the SVC lr with this in case
	# we are returning to SVC mode.  Since we will return from IRQ
	# mode, we need to copy the spsr to the IRQ mode spsr register.
	cps #IRQ_MODE
	mov lr, r4
	msr spsr, r7
	cps #SVC_MODE

	# Restore SYS mode SP, LR
	cps #SYS_MODE
	mov r13, r2
	mov r14, r3

	# Restore SVC mode SP, LR
	cps #SVC_MODE
	mov r13, r5
	mov r14, r6
	# Leave state in SVC mode.

	# Restore first set of general purpose registers
	ldm r0, {r0-r12}

	# Clear exclusive state, so that spinlocks will work after this.

	clrex

	# Restore PC from IRQ mode link register and move SPSR to CPSR
	cps #IRQ_MODE
	subs pc, lr, #4

	.end
