/* Copyright (C) 2021 Rain */

/* This file is part of XNIX. */

/* 
  XNIX is free software: you can redistribute it and/or modify 
  it under the terms of the GNU General Public License as published by 
  the Free Software Foundation, either version 3 of the License, or 
  (at your option) and later version. 
*/

/*
  XNIX is distributed in the hope that it will be useful, 
  but WITHOUT ANY WARRANTY; without even the implied warranty of 
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 
  GNU General Public License for more details. 
*/

/*
  You should have received a copy of the GNU General Public License 
   along with XNIX. If not, see <https://www.gnu.org/licenses/>.  
*/



#define __ASM__

#include <kernel/int.h>


.globl __stack_chk_fail

__stack_chk_fail:
	/* panic and do stack trace */
	movq schkfail_msg(%rip), %rdi
	call panic

scf_die:
	hlt
	jmp scf_die


.globl divide_error

divide_error:
	/* that fake error-code */
	pushq $0

	/* we are going to change %rax */
	pushq %rax

	/* function handler */
	leaq do_divide_error(%rip), %rax

	/* restore %rax, and save handler to stack */
	/* but now %rax is not in stack, push it later in error_code */
	xorq %rax, (%rsp)

error_code:
	cli

	/* and save other registers seriously now */

	pushq %rax
	pushq %rcx
	pushq %rdx
	pushq %rbx

	pushq %rbp
	pushq %rsi
	pushq %rdi
	
	pushq %r8
	pushq %r9
	pushq %r10
	pushq %r11
	pushq %r12
	pushq %r13
	pushq %r14
	pushq %r15

	/* segments also */

	movq %ds, %rax
	pushq %rax

	movq %es, %rax
	pushq %rax

	movq %fs, %rax
	pushq %rax

	movq %gs, %rax
	pushq %rax

	/* KERNEL_DS */
	movq $0x10, %rax
	movq %rax, %ds
	movq %rax, %es
	movq %rax, %fs
	movq %rax, %gs

	/* call that interrupt handler with two arguments */

	movq %rsp, %rdi
	movq ERRCODE(%rsp), %rsi

	movq HANDLER(%rsp), %rax

	callq *%rax


restore_all:
	/* now we restore and return */

	popq %rax
	movq %gs, %rax

	popq %rax
	movq %fs, %rax

	popq %rax
	movq %es, %rax

	popq %rax
	movq %ds, %rax

	popq %r15
	popq %r14
	popq %r13
	popq %r12

	popq %r11
	popq %r10
	popq %r9
	popq %r8
	
	popq %rdi
	popq %rsi
	popq %rbp

	popq %rbx
	popq %rdx
	popq %rcx

	popq %rax

	/* ignore that error-code and handler */
	addq $16, %rsp

	sti

	/* then say goodbye to this interrupt */
	iretq

.globl debug

debug:
	pushq $0
	
	pushq %rax
	leaq do_debug(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code

.globl nmi

nmi:
	pushq $0

	pushq %rax
	leaq do_nmi(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code

.globl int3

int3:
	pushq $0

	pushq %rax
	leaq do_int3(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code

.globl overflow

overflow:
	pushq $0

	pushq %rax
	leaq do_overflow(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code

.globl bound

bound:
	pushq $0
	pushq %rax
	leaq do_bound(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code

.globl invalid_code

invalid_code:
	pushq $0

	pushq %rax
	leaq do_ud(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code

.globl device_not_avaliable

device_not_avaliable:
	pushq $0

	pushq %rax
	leaq do_no_fpu(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code

.globl double_fault

double_fault:
	/* we shoudn't push $0 because error-code is already in stack */

	pushq %rax
	leaq do_double_fault(%rip), %rax

	xorq %rax, (%rsp)


.globl fpu_segment_overrun

fpu_segment_overrun:
	pushq %rax
	leaq do_fpu_overrun(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code


.globl invalid_tss

invalid_tss:
	pushq %rax
	leaq do_invalid_tss(%rip), %rax

	xorq %rax, (%rsp)
	
	jmp error_code

.globl segment_not_present

segment_not_present:
	pushq %rax
	leaq do_no_segment(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code


.globl stack_error

stack_error:
	pushq %rax
	leaq do_stack(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code


.globl general_protection

general_protection:
	pushq %rax
	leaq do_gp(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code

.globl page_fault

page_fault:
	pushq %rax
	leaq do_page_fault(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code

.globl reserved

reserved:
	pushq $0

	pushq %rax
	leaq do_reserved(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code

.globl fpu_error

fpu_error:
	pushq $0

	pushq %rax
	leaq do_fpu_error(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code

.globl alignment

alignment:
	pushq %rax
	leaq do_alignment(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code

.globl machine

machine:
	pushq $0

	pushq %rax
	leaq do_machine(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code

.globl simd

simd:
	pushq $0

	pushq %rax
	leaq do_simd(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code

.globl virtualization

virtualization:
	pushq $0

	pushq %rax
	leaq do_virtualization(%rip), %rax

	xorq %rax, (%rsp)

	jmp error_code


.globl ret_intr

ret_intr:
	/* this is different with restore_all, but they do same things. */
	/* but interrupts haven't got an error-code */

	popq %rax
	movq %rax, %gs

	popq %rax
	movq %rax, %fs

	popq %rax
	movq %rax, %es

	popq %rax
	movq %rax, %ds

	popq %r15
	popq %r14
	popq %r13
	popq %r12

	popq %r11
	popq %r10
	popq %r9
	popq %r8

	popq %rdi
	popq %rsi
	popq %rbp

	popq %rdx
	popq %rbx
	popq %rcx
	popq %rax

	sti

	iretq


.globl syscall

syscall:
	/* we can't use error_code here because error_code will change %rax */

	pushq %rax
	pushq %rcx
	pushq %rbx
	pushq %rdx

	pushq %rbp
	pushq %rsi
	pushq %rdi

	pushq %r8
	pushq %r9
	pushq %r10
	pushq %r11

	pushq %r12
	pushq %r13
	pushq %r14
	pushq %r15

	movq %rbx, %rdi
	movq %rcx, %rsi

	movq %ds, %rcx
	pushq %rcx

	movq %es, %rcx
	pushq %rcx

	movq %fs, %rcx
	pushq %rcx

	movq %gs, %rcx
	pushq %rcx

	callq *syscall_table(, %rax, 8)

	jmp ret_intr



.section .rodata

schkfail_msg:
	.asciz "kernel panic: stack check fail. \n"
