/*
 * Copyright (c) 2008, Artur Emagulov
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the name of the author nor the names of any co-contributors
 *    may be used to endorse or promote products derived from this software
 *    without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 */

/*
 * 23.08.2008
 * atoms.S - low level code
 */

#define __ASSEMBLY__

#include <x86/platform.h>
#include <x86/asm.h>

#define HAVE_ASM_USCORE 0
#define ASM     1
#include <multiboot.h>

	.extern __up_syscall_handler;
	.extern kmain,_kmain;

.section ".text0"
	.globl _start, start

.align
ENTRY(start)
_start:
	jmp     do_start

ENTRY(do_start)
	/* Pop magic value */
	movl	4(%esp),%eax
	/* Pop hwpp pointer */
	movl	8(%esp),%ebx

	movl $__sys_stack_top, %esp

	/* Push the magic value. */
	pushl  %ebx
	/* Push the pointer to the Multiboot information structure. */
	pushl   %eax
	/* Now enter the C main function... */
	call    EXT_C(kmain)

loop:   
	hlt
	jmp     loop

/**
 * Macro to save/restore registers
 *
 * This macro builds the trap frame by pushing registers.
 * If you change the push order of these macro, you must change the
 * trap frame structure in arch.h. In addition, the system call stub
 * will depend on this register format.
 */
#define SAVE_ALL \
	cld; \
	pushl	%es; \
	pushl	%ds; \
	pushl	%eax; \
	pushl	%ebp; \
	pushl	%edi; \
	pushl	%esi; \
	pushl	%edx; \
	pushl	%ecx; \
	pushl	%ebx;

#define RESTORE_ALL \
	popl	%ebx; \
	popl	%ecx; \
	popl	%edx; \
	popl	%esi; \
	popl	%edi; \
	popl	%ebp; \
	popl	%eax; \
	popl	%ds; \
	popl	%es;

#define SETUP_SEG \
	movl	$(KERNEL_DS), %edx; \
	movl	%edx, %ds; \
	movl	%edx, %es;


.section ".text"


/**
 * TODO: Need to implement sched_lock(),interrupt_handler(),exception_deliver()
 *
 * Common entry for all interrupts
 * Setup interrupt stack for outermost interrupt.
 * The code should be written to prevent the stack overflow
 * by continuous interrupt as much as it can.
 */
ENTRY(interrupt_common)
	SAVE_ALL
	SETUP_SEG
	pushl	%esp				/* push 'cpu_regs' pointer */
	call	__up_interrupt_handler	/* Process interrupt (call to 'C' common IRQ handler) */
	addl	$4, %esp
	RESTORE_ALL
	addl	$8, %esp
	iret


/**
 * Macro to build interrupt entry
 */
#define INTR_ENTRY(irq) \
ENTRY(intr_##irq) \
	pushl	$0; \
	pushl	$(irq); \
	jmp	interrupt_common

INTR_ENTRY(0)
INTR_ENTRY(1)
INTR_ENTRY(2)
INTR_ENTRY(3)
INTR_ENTRY(4)
INTR_ENTRY(5)
INTR_ENTRY(6)
INTR_ENTRY(7)
INTR_ENTRY(8)
INTR_ENTRY(9)
INTR_ENTRY(10)
INTR_ENTRY(11)
INTR_ENTRY(12)
INTR_ENTRY(13)
INTR_ENTRY(14)
INTR_ENTRY(15)

/**
 * TODO: Need to implement trap_handler()
 *
 * Common entry for all traps
 * New thread will start from trap_ret.
 */
ENTRY(trap_common)
	SAVE_ALL
	SETUP_SEG
	pushl	%esp
	cli
	call	__up_trap_handler
	addl	$4, %esp
	RESTORE_ALL
	addl	$8, %esp
	sti
	iret

/**
 * Default trap entry
 */
ENTRY(trap_default)
	pushl	$0
	pushl	$(INVALID_INT)
	jmp		trap_common

/**
 * Macro to build trap entry
 * Some trap will push the error code into stack.
 */
#define TRAP_ENTRY(id) \
ENTRY(trap_##id) \
	pushl	$0; \
	pushl	$(id); \
	jmp	trap_common;

#define TRAP_ERR_ENTRY(id) \
ENTRY(trap_##id) \
	pushl	$(id); \
	jmp	trap_common;

TRAP_ENTRY    ( 0)		/* Divide error */
TRAP_ENTRY    ( 1)		/* Debug trap */
TRAP_ENTRY    ( 2)		/* NMI */
TRAP_ENTRY    ( 3)		/* Breakpoint */
TRAP_ENTRY    ( 4)		/* Overflow */
TRAP_ENTRY    ( 5)		/* Bounds check */
TRAP_ENTRY    ( 6)		/* Invalid opecode */
TRAP_ENTRY    ( 7)		/* Device not available */
TRAP_ERR_ENTRY( 8)		/* Double fault */
TRAP_ERR_ENTRY( 9)		/* Coprocessor overrun */
TRAP_ERR_ENTRY(10)		/* Invalid TSS */
TRAP_ERR_ENTRY(11)		/* Segment not present */
TRAP_ERR_ENTRY(12)		/* Stack bounds */
TRAP_ERR_ENTRY(13)		/* General Protection */
TRAP_ERR_ENTRY(14)		/* Page fault */
TRAP_ENTRY    (15)		/* (reserved) */
TRAP_ENTRY    (16)		/* Coprocessor error */
TRAP_ERR_ENTRY(17)		/* Alignment check */
TRAP_ERR_ENTRY(18)		/* Cache flush denied */

/**
 * TODO: Need to implement exception_deliver()
 *
 * System call entry
 */
	.global syscall_ret
ENTRY(syscall_entry)
	pushl	$0			/* Dummy for error code */
	pushl	%eax		/* Syscall number */
	SAVE_ALL
	SETUP_SEG
	pushl	%esp
	call	__up_syscall_handler	/* Call function */
	addl 	$4, %esp
	movl	%eax, 0x18(%esp)	/* Set return value to eax */
syscall_ret:
	RESTORE_ALL
	addl	$8, %esp		/* Discard err/trap no */
	iret

/**
 * Switch register context.
 * Interrupts must be disabled by caller.
 *
 * syntax - void cpu_switch(kern_regs *prev, kern_regs *next)
 *
 * Note: GCC assumes ebx,ebp,edi,esi registers are not changed in each routine.
 */
ENTRY(cpu_switch)
	movl	4(%esp), %ecx		/* Point ecx to previous registers */
	movl	(%esp), %eax		/* Get return address */
	movl	%eax, 0(%ecx)		/* Save it as eip */
	movl	%ebx, 4(%ecx)		/* Save ebx */
	movl	%edi, 8(%ecx)		/* Save edi */
	movl	%esi, 12(%ecx)		/* Save esi */
	movl	%ebp, 16(%ecx)		/* Save ebp */
	movl	%esp, 20(%ecx)		/* Save esp */
	movl	8(%esp), %ecx		/* Point ecx to next registers */
	movl	4(%ecx), %ebx		/* Restore ebx */
	movl	8(%ecx), %edi		/* Restore edi */
	movl	12(%ecx), %esi		/* Restore esp */
	movl	16(%ecx), %ebp		/* Restore ebp */
	movl	20(%ecx), %esp		/* Restore esp */
	movl	0(%ecx), %eax		/* Get eip */
	movl	%eax, (%esp)		/* Restore it as return address */
	ret

/**
 * Copy data from user to kernel space.
 * Returns 0 on success, or EFAULT on page fault.
 *
 *  syntax - int umem_copyin(const void *uaddr, void *kaddr, size_t len)
 */
	.global known_fault1
ENTRY(umem_copyin)
	pushl	%esi
	pushl	%edi
	pushl	$14			/* Set EFAULT as default return */

	movl	16(%esp), %esi
	movl	20(%esp), %edi
	movl	24(%esp), %ecx

	movl	%esi, %edx		/* Check if valid user address */
	addl	%ecx, %edx
	jc	umem_fault
	cmpl	$(USER_MAX), %edx
	jae	umem_fault
	cld
known_fault1:				/* May be fault here */
	rep
	movsb

	popl	%eax
	xorl	%eax, %eax		/* Set no error */
	popl	%edi
	popl	%esi
	ret

/**
 * Copy data to user from kernel space.
 * Returns 0 on success, or EFAULT on page fault.
 *
 *  syntax - int umem_copyout(const void *kaddr, void *uaddr, size_t len)
 */
	.global known_fault2
ENTRY(umem_copyout)
	pushl	%esi
	pushl	%edi
	pushl	$14			/* Set EFAULT as default return */

	movl	16(%esp), %esi
	movl	20(%esp), %edi
	movl	24(%esp), %ecx

	movl	%edi, %edx
	addl	%ecx, %edx
	jc	umem_fault
	cmpl	$(USER_MAX), %edx
	jae	umem_fault
	cld
known_fault2:
	rep
	movsb

	popl	%eax
	xorl	%eax, %eax		/* Set no error */
	popl	%edi
	popl	%esi
	ret

/**
 * umem_strnlen - Get length of string in user space
 * Returns 0 on success, or EFAULT on page fault.
 *
 *  syntax - int umem_strnlen(const char *uaddr, size_t maxlen, size_t *len)
 *
 * Note: The returned length value does NOT include the NULL terminator.
 */
	.global known_fault3
ENTRY(umem_strnlen)
	pushl	%esi
	pushl	%edi
	pushl	$14			/* Set EFAULT as default return */

	movl	16(%esp), %edi
	movl	20(%esp), %ecx
	movl	24(%esp), %esi

	movl	%edi, %edx
	cmpl	$(USER_MAX), %edx
	jae	umem_fault
	movl	%ecx, %edx
	cld
	xor	%eax, %eax
known_fault3:
	repne
	scasb
	subl	%ecx, %edx
	decl	%edx			/* Adjust for terminator */
	movl	%edx, (%esi)

	popl	%eax
	xorl	%eax, %eax		/* Set no error */
	popl	%edi
	popl	%esi
	ret

/**
 * Fault entry for user access
 */
ENTRY(umem_fault)
	popl	%eax			/* Get return value from stack */
	popl	%edi
	popl	%esi
	ret

/**
 * Reset cpu
 * Use triple fault
 */
ENTRY(cpu_reset)
	cli
	movl	$null_idt, %eax		/* Reset by triple fault */
	lidt	(%eax)
	int	$3
	hlt

ENTRY(flush_tlb)
	movl	%cr3, %eax
	movl	%eax, %cr3
	ret

ENTRY(ltr)
	movl	4(%esp), %eax
	ltr	%ax
	ret

ENTRY(lgdt)
	movl	4(%esp), %eax
	lgdt	(%eax)
	jmp	1f			/* Flush the prefetch queue */
	nop
1:
	movl	$(KERNEL_DS), %eax
	movl	%eax, %ds
	movl	%eax, %es
	movl	%eax, %fs
	movl	%eax, %gs
	movl	%eax, %ss
	movl	$(KERNEL_CS), %eax
	pushl	%eax
	pushl	$2f
	lret
2:
	ret

ENTRY(lidt)
	movl	4(%esp), %eax
	lidt	(%eax)
	ret

ENTRY(get_eflags)
	pushfl
	popl	%eax
	ret

ENTRY(set_eflags)
	movl	4(%esp), %eax
	pushl	%eax
	popfl
	ret

ENTRY(set_cr0)
	movl	4(%esp), %eax
	movl	%eax, %cr0
	ret

ENTRY(get_cr0)
	movl	%cr0, %eax
	ret

ENTRY(get_cr2)
	movl	%cr2, %eax
	ret

ENTRY(set_cr3)
	movl	4(%esp), %eax
	movl	%eax, %cr3
	ret

ENTRY(get_cr3)
	movl	%cr3, %eax
	ret

ENTRY(__inl)
	movl	4(%esp), %edx
	xorl	%eax, %eax
	inl		%dx, %eax
	ret

ENTRY(__inw)
	movl	4(%esp), %edx
	xorl	%eax, %eax
	inw		%dx, %ax
	ret

ENTRY(__inb)
	movl	4(%esp), %edx
	xorl	%eax, %eax
	inb		%dx, %al
	ret

ENTRY(__outl)
	movl	4(%esp), %eax
	movl	8(%esp), %edx
	outl	%eax, %dx
	ret

ENTRY(__outw)
	movl	4(%esp), %eax
	movl	8(%esp), %edx
	outw	%ax, %dx
	ret

ENTRY(__outb)
	movl	4(%esp), %eax
	movl	8(%esp), %edx
	outb	%al, %dx
	ret

ENTRY(outb_p)
	movl	4(%esp), %eax
	movl	8(%esp), %edx
	outb	%al, %dx
	outb	%al, $0x80
	ret

ENTRY(inb_p)
	movl	4(%esp), %edx
	xorl	%eax, %eax
	inb	%dx, %al
	outb	%al, $0x80
	ret

ENTRY(interrupt_disable)
	cli
	ret

ENTRY(interrupt_enable)
	sti
	ret

ENTRY(breakpoint)
	int	$3
	ret

ENTRY(cpu_idle)
	sti
	hlt
	ret

ENTRY(stack_setup)
	movl (%esp), %eax /* save return address */
	movl $__sys_stack_top, %esp
	movl %eax, (%esp) /* restore return address */
	ret

	.align 4
null_idt:
	.word	0
	.long	0

/**
 * Interrupt nest counter.
 *
 * This counter is incremented in the entry of interrupt handler
 * to switch the interrupt stack. Since all interrupt handlers
 * share same one interrupt stack, each handler must pay attention
 * to the stack overflow.
 */
	.section ".bss"
irq_nesting:
	.long	0

.comm   stack, STACK_SIZE

.end
