/*
 * thread.c
 *
 * Copyright (C) 2018 Aleksandar Andrejevic <theflash@sdf.lonestar.org>
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Affero General Public License as
 * published by the Free Software Foundation, either version 3 of the
 * License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU Affero General Public License for more details.
 *
 * You should have received a copy of the GNU Affero General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <thread.h>
#include <timer.h>
#include <process.h>
#include <exception.h>
#include <syscalls.h>
#include <segments.h>
#include <heap.h>
#include <cpu.h>

extern void reschedule(void);

bool_t scheduler_enabled = FALSE;
static list_entry_t thread_queue[THREAD_PRIORITY_MAX];
static thread_t *current_thread = NULL;
static thread_t *last_fpu_thread = NULL;
static dword_t tid_alloc_bitmap[MAX_THREADS / 32];
static DECLARE_LOCK(tid_bitmap_lock);

static dword_t alloc_tid()
{
    int i;
    dword_t tid = (dword_t)-1;

    lock_acquire(&tid_bitmap_lock);

    for (i = 0; i < MAX_THREADS; i++)
    {
        if (!test_bit(tid_alloc_bitmap, i))
        {
            tid = i;
            set_bit(tid_alloc_bitmap, i);
            break;
        }
    }

    lock_release(&tid_bitmap_lock);
    return tid;
}

static inline bool_t test_condition(wait_condition_t *condition)
{
    wait_condition_t **ptr;

    switch (condition->type)
    {
    case WAIT_GROUP_ANY:
        for (ptr = condition->conditions; *ptr; ptr++) if (test_condition(*ptr)) return TRUE;
        return FALSE;
    case WAIT_GROUP_ALL:
        for (ptr = condition->conditions; *ptr; ptr++) if (!test_condition(*ptr)) return FALSE;
        return TRUE;
    case WAIT_ALWAYS:
        return FALSE;
    case WAIT_UNTIL_EQUAL:
        return *condition->pointer == condition->value;
    case WAIT_UNTIL_NOT_EQUAL:
        return (*condition->pointer != condition->value);
    case WAIT_UNTIL_LESS:
        return (*condition->pointer < condition->value);
    case WAIT_UNTIL_NOT_LESS:
        return (*condition->pointer >= condition->value);
    case WAIT_UNTIL_GREATER:
        return (*condition->pointer > condition->value);
    case WAIT_UNTIL_NOT_GREATER:
        return (*condition->pointer <= condition->value);
    default:
        KERNEL_CRASH("Invalid wait condition value");
        return FALSE;
    }
}

static inline bool_t is_thread_ready(thread_t *thread)
{
    qword_t current_time = timer_get_milliseconds();

    if (thread->terminated) return FALSE;
    if (thread->frozen > 0 && !thread->in_kernel) return FALSE;
    if (!thread->wait) return TRUE;

    if (test_condition(thread->wait->root))
    {
        thread->wait->result = WAIT_CONDITION_HIT;
        thread->wait = NULL;
        return TRUE;
    }

    if (thread->wait->timeout != NO_TIMEOUT && (current_time - thread->wait->timestamp) >= (qword_t)thread->wait->timeout)
    {
        thread->wait->result = WAIT_TIMED_OUT;
        thread->wait = NULL;
        return TRUE;
    }

    if (thread->terminating)
    {
        thread->wait->result = WAIT_CANCELED;
        thread->wait = NULL;
        return TRUE;
    }

    return FALSE;
}

static void destroy_thread(thread_t *thread)
{
    list_remove(&thread->in_queue_list);

    lock_acquire(&thread->owner_process->thread_list_lock);
    list_remove(&thread->in_process_list);
    lock_release(&thread->owner_process->thread_list_lock);

    free(thread->kernel_stack);
    thread->kernel_stack = NULL;

    if (thread->owner_process->threads.next == &thread->owner_process->threads)
    {
        destroy_process(thread->owner_process);
    }

    dereference(&thread->header);
}

void thread_cleanup(object_t *obj)
{
    if (CONTAINER_OF(obj, thread_t, header) == last_fpu_thread) last_fpu_thread = NULL;
}

dword_t thread_pre_wait(object_t *obj, void *parameter, wait_condition_t *condition)
{
    thread_t *thread = (thread_t*)obj;
    condition->type = WAIT_UNTIL_NOT_EQUAL;
    condition->pointer = &thread->terminated;
    condition->value = FALSE;
    return ERR_SUCCESS;
}

dword_t create_thread_internal(process_t *proc, thread_state_t *initial_state, dword_t flags, priority_t priority, void *kernel_stack, thread_t **new_thread)
{
    dword_t ret;
    if (proc->terminating) return ERR_CANCELED;

    thread_t *thread = (thread_t*)malloc(sizeof(thread_t));
    if (thread == NULL) return ERR_NOMEMORY;

    init_object(&thread->header, NULL, OBJECT_THREAD);

    ret = create_object(&thread->header);
    if (ret != ERR_SUCCESS)
    {
        free(thread);
        return ret;
    }

    thread->tid = alloc_tid();
    if (thread->tid == (dword_t)-1)
    {
        ret = ERR_NOMEMORY;
        goto cleanup;
    }

    thread->priority = priority;
    thread->quantum = QUANTUM;
    thread->frozen = (flags & THREAD_CREATE_FROZEN) ? TRUE : FALSE;
    thread->running_ticks = 0ULL;
    thread->owner_process = proc;
    thread->exit_code = 0;
    thread->terminating = FALSE;
    thread->terminated = FALSE;
    thread->last_context = NULL;
    thread->wait = NULL;
    memset(&thread->kernel_handler, 0, sizeof(thread->kernel_handler));
    memset(&thread->user_handler, 0, sizeof(thread->user_handler));

    thread->state = *initial_state;
    thread->state.regs.eflags = 0x202;

    if (proc != kernel_process)
    {
        thread->previous_mode = USER_MODE;
        thread->in_kernel = 0;
        thread->state.regs.cs = get_user_code_selector();
        thread->state.regs.data_selector = get_user_data_selector();
    }
    else
    {
        thread->previous_mode = KERNEL_MODE;

        thread->state.regs.cs = get_kernel_code_selector();
        thread->state.regs.data_selector = get_kernel_data_selector();
    }

    thread->kernel_stack = kernel_stack;
    thread->kernel_esp = ((uintptr_t)thread->kernel_stack + KERNEL_STACK_SIZE + 3) & ~3;

    lock_acquire(&thread->owner_process->thread_list_lock);
    list_append(&proc->threads, &thread->in_process_list);
    lock_release(&thread->owner_process->thread_list_lock);

    critical_t critical;
    enter_critical(&critical);
    list_append(&thread_queue[priority], &thread->in_queue_list);
    leave_critical(&critical);

    *new_thread = thread;
    ret = ERR_SUCCESS;

cleanup:
    if (ret != ERR_SUCCESS)
    {
        if (thread->kernel_stack) free(thread->kernel_stack);
        if (thread != NULL) dereference(&thread->header);

        if (thread->tid != (dword_t)-1)
        {
            lock_acquire(&tid_bitmap_lock);
            clear_bit(tid_alloc_bitmap, thread->tid);
            lock_release(&tid_bitmap_lock);
        }
    }

    return ret;
}

thread_t *get_current_thread()
{
    return current_thread;
}

void thread_lazy_fpu(void)
{
    if (last_fpu_thread) cpu_save_fpu_state(last_fpu_thread->state.fpu_state);
    cpu_restore_fpu_state(current_thread->state.fpu_state);
    last_fpu_thread = current_thread;
    asm volatile ("clts");
}

#include <log.h>

void scheduler(registers_t *regs)
{
    int i;
    critical_t critical;
    enter_critical(&critical);

    if (current_thread->quantum == 0)
    {
        list_append(&thread_queue[current_thread->priority], &current_thread->in_queue_list);
        thread_t *next_thread = NULL;

        for (i = 0; i < THREAD_PRIORITY_MAX; i++)
        {
            list_entry_t *ptr;

            for (ptr = thread_queue[i].next; ptr != &thread_queue[i]; ptr = ptr->next)
            {
                thread_t *thread = CONTAINER_OF(ptr, thread_t, in_queue_list);

                if (is_thread_ready(thread))
                {
                    next_thread = thread;
                    goto found;
                }
            }
        }

found:
        ASSERT(next_thread != NULL);
        list_remove(&next_thread->in_queue_list);

        if (current_thread->tid != 0) ASSERT(current_thread->kernel_esp >= (uintptr_t)current_thread->kernel_stack);
        if (next_thread->tid != 0) ASSERT(next_thread->kernel_esp >= (uintptr_t)next_thread->kernel_stack);

        if (current_thread != next_thread)
        {
            memcpy(&current_thread->state.regs, regs, sizeof(registers_t));
            current_thread->kernel_esp = regs->esp;
            if (SEGMENT_RPL(regs->cs) != 0) current_thread->state.regs.esp = ((registers_ext_t*)regs)->esp3;

            set_kernel_esp(next_thread->kernel_esp);

            /*asm volatile ("pushl %eax\n"
                          "movl %cr4, %eax\n"
                          "orb $0x08, %al\n"
                          "movl %eax, %cr4\n"
                          "popl %eax\n");*/

            if (SEGMENT_RPL(next_thread->state.regs.cs) != 0)
            {
                push_to_stack(&next_thread->kernel_esp, get_user_data_selector());
                push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esp);
            }

            push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eflags);
            push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.cs);
            push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eip);
            push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.error_code);
            push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eax);
            push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ecx);
            push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.edx);
            push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ebx);
            push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esp);
            push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ebp);
            push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esi);
            push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.edi);
            push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.data_selector);

            regs->esp = next_thread->kernel_esp;
            regs->error_code = CONTEXT_SWITCH_MAGIC;

            if (current_thread->owner_process != next_thread->owner_process)
            {
                set_page_directory(next_thread->owner_process->memory_space.page_directory);
            }
        }

        if (current_thread->owner_process != kernel_process)
        {
            bump_address_space(&current_thread->owner_process->memory_space);
        }

        if (current_thread->terminating && !current_thread->in_kernel) current_thread->terminated = TRUE;
        if (current_thread->terminated) destroy_thread(current_thread);
        current_thread = next_thread;
        current_thread->quantum = QUANTUM;
    }
    else
    {
        current_thread->quantum--;
    }

    leave_critical(&critical);
}

wait_result_t scheduler_wait(wait_condition_t *condition, dword_t timeout)
{
    if (test_condition(condition)) return WAIT_CONDITION_HIT;
    if (timeout == 0) return WAIT_TIMED_OUT;

    wait_t wait = { .root = condition, .timeout = timeout,  .timestamp = timer_get_milliseconds(), .result = WAIT_CANCELED };
    while (!__sync_bool_compare_and_swap(&current_thread->wait, NULL, &wait)) continue;
    syscall_yield_quantum();

    return wait.result;
}

sysret_t syscall_sleep(qword_t milliseconds)
{
    wait_condition_t condition = { .type = WAIT_ALWAYS };
    return scheduler_wait(&condition, milliseconds) == WAIT_CANCELED ? ERR_CANCELED : ERR_SUCCESS;
}

sysret_t syscall_yield_quantum()
{
    current_thread->quantum = 0;
    reschedule();
    return ERR_SUCCESS;
}

dword_t create_system_thread(thread_procedure_t routine, dword_t flags, priority_t priority, dword_t stack_size, void *param, thread_t **new_thread)
{
    thread_state_t initial_state;
    memset(&initial_state, 0, sizeof(initial_state));

    if (!stack_size) stack_size = KERNEL_STACK_SIZE;

    void *kernel_stack = malloc(stack_size + sizeof(uintptr_t) - 1);
    if (kernel_stack == NULL) return ERR_NOMEMORY;

    dword_t ret = commit_pages(kernel_stack, stack_size);
    if (ret != ERR_SUCCESS)
    {
        free(kernel_stack);
        return ret;
    }

    initial_state.regs.eip = (dword_t)routine;
    initial_state.regs.esp = ((dword_t)kernel_stack + stack_size + 3) & ~3;

    push_to_stack((uintptr_t*)&initial_state.regs.esp, (uintptr_t)param);

    return create_thread_internal(kernel_process, &initial_state, flags, priority, kernel_stack, new_thread);
}

sysret_t syscall_create_thread(handle_t process, thread_state_t *initial_state, dword_t flags, priority_t priority, handle_t *new_thread)
{
    dword_t ret;
    thread_state_t safe_state;
    process_t *proc;
    thread_t *thread;

    if (get_previous_mode() == USER_MODE)
    {
        if (!check_usermode(initial_state, sizeof(initial_state))) return ERR_BADPTR;
        if (!check_usermode(new_thread, sizeof(*new_thread))) return ERR_BADPTR;

        EH_TRY safe_state = *initial_state;
        EH_CATCH EH_ESCAPE(return ERR_BADPTR);
        EH_DONE;
    }
    else
    {
        safe_state = *initial_state;
    }

    if (process != INVALID_HANDLE)
    {
        if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc)) return ERR_INVALID;
    }
    else
    {
        proc = get_current_process();
        reference(&proc->header);
    }

    if (get_previous_mode() == USER_MODE && proc == kernel_process)
    {
        ret = ERR_FORBIDDEN;
        goto cleanup;
    }

    void *kernel_stack = malloc(KERNEL_STACK_SIZE + sizeof(uintptr_t) - 1);
    if (kernel_stack == NULL)
    {
        ret = ERR_NOMEMORY;
        goto cleanup;
    }

    ret = commit_pages(kernel_stack, KERNEL_STACK_SIZE);
    if (ret != ERR_SUCCESS) goto cleanup;

    ret = create_thread_internal(proc, &safe_state, flags, priority, kernel_stack, &thread);
    if (ret != ERR_SUCCESS)
    {
        free(kernel_stack);
        goto cleanup;
    }

    handle_t thread_handle;
    ret = open_object(&thread->header, 0, &thread_handle);

    EH_TRY *new_thread = thread_handle;
    EH_CATCH syscall_close_object(thread_handle);
    EH_DONE;

cleanup:
    dereference(&proc->header);
    return ret;
}

sysret_t syscall_open_thread(dword_t tid, handle_t *handle)
{
    int i;
    thread_t *thread = NULL;
    dword_t ret = ERR_NOTFOUND;
    critical_t critical;
    enter_critical(&critical);

    if (current_thread->tid == tid)
    {
        thread = current_thread;
    }
    else
    {
        for (i = 0; i < THREAD_PRIORITY_MAX; i++)
        {
            list_entry_t *ptr = thread_queue[i].next;

            for (ptr = thread_queue[i].next; ptr != &thread_queue[i]; ptr = ptr->next)
            {
                thread_t *entry = CONTAINER_OF(ptr, thread_t, in_queue_list);

                if (entry->tid == tid)
                {
                    thread = entry;
                    goto found;
                }
            }
        }
    }

found:
    if (thread != NULL) ret = open_object(&thread->header, 0, handle);
    else ret = ERR_NOTFOUND;

    leave_critical(&critical);
    return ret;
}

dword_t terminate_thread_internal(thread_t *thread, dword_t exit_code)
{
    thread->exit_code = exit_code;
    thread->terminating = TRUE;
    return ERR_SUCCESS;
}

sysret_t syscall_terminate_thread(handle_t handle, dword_t exit_code)
{
    thread_t *thread;

    if (handle == INVALID_HANDLE)
    {
        thread = get_current_thread();
        reference(&thread->header);
    }
    else
    {
        if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
    }

    return terminate_thread_internal(thread, exit_code);
}

sysret_t syscall_query_thread(handle_t handle, thread_info_t info_type, void *buffer, size_t size)
{
    dword_t ret = ERR_SUCCESS;
    thread_t *thread;
    void *safe_buffer;

    if (get_previous_mode() == USER_MODE)
    {
        if (!check_usermode(buffer, size)) return ERR_BADPTR;

        safe_buffer = malloc(size);
        if (safe_buffer == NULL) return ERR_NOMEMORY;
        memset(safe_buffer, 0, size);
    }
    else
    {
        safe_buffer = buffer;
    }

    if (handle == INVALID_HANDLE)
    {
        thread = get_current_thread();
        reference(&thread->header);
    }
    else
    {
        if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
    }

    switch (info_type)
    {
        case THREAD_TID_INFO:
            if (size >= sizeof(dword_t)) *((dword_t*)safe_buffer) = thread->tid;
            else ret = ERR_SMALLBUF;
            break;

        case THREAD_FROZEN_INFO:
            if (size >= sizeof(int32_t)) *((int32_t*)safe_buffer) = thread->frozen;
            else ret = ERR_SMALLBUF;
            break;

        case THREAD_CPU_STATE_INFO:
            if (size >= sizeof(thread_state_t))
            {
                if (current_thread->tid != thread->tid)
                {
                    *((thread_state_t*)safe_buffer) = thread->state;
                }
                else
                {
                    ((thread_state_t*)safe_buffer)->regs = *thread->last_context;
                    cpu_save_fpu_state(((thread_state_t*)safe_buffer)->fpu_state);
                }
            }
            else
            {
                ret = ERR_SMALLBUF;
            }

            break;

        case THREAD_PRIORITY_INFO:
            if (size >= sizeof(priority_t)) *((priority_t*)safe_buffer) = thread->priority;
            else ret = ERR_SMALLBUF;
            break;

        case THREAD_AFFINITY_INFO:
            if (size >= sizeof(affinity_t)) *((affinity_t*)safe_buffer) = thread->affinity;
            else ret = ERR_SMALLBUF;

        default:
            ret = ERR_INVALID;
    }

    if (get_previous_mode() == USER_MODE)
    {
        EH_TRY memcpy(buffer, safe_buffer, size);
        EH_CATCH ret = ERR_BADPTR;
        EH_DONE;

        free(safe_buffer);
    }

    dereference(&thread->header);
    return ret;
}

sysret_t syscall_set_thread(handle_t handle, thread_info_t info_type, const void *buffer, size_t size)
{
    dword_t ret;
    thread_t *thread;
    void *safe_buffer;

    if (get_previous_mode() == USER_MODE)
    {
        if (!check_usermode(buffer, size)) return ERR_BADPTR;

        safe_buffer = malloc(size);
        if (safe_buffer == NULL) return ERR_NOMEMORY;

        EH_TRY memcpy(safe_buffer, buffer, size);
        EH_CATCH ret = ERR_BADPTR;
        EH_DONE;
    }
    else
    {
        safe_buffer = (void*)buffer;
    }

    if (handle == INVALID_HANDLE)
    {
        thread = get_current_thread();
        reference(&thread->header);
    }
    else
    {
        if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread))
        {
            if (get_previous_mode() == USER_MODE) free(safe_buffer);
            return ERR_INVALID;
        }
    }

    switch (info_type)
    {
    case THREAD_CPU_STATE_INFO:
        if (size >= sizeof(thread_state_t))
        {
            if (thread->owner_process->pid == kernel_process->pid) return ERR_FORBIDDEN;
            thread_state_t *new_state = (thread_state_t*)safe_buffer;

            critical_t critical;
            if (current_thread->tid != thread->tid) enter_critical(&critical);

            if (thread->in_kernel == 0)
            {
                thread->state.regs.eax = new_state->regs.eax;
                thread->state.regs.ecx = new_state->regs.ecx;
                thread->state.regs.edx = new_state->regs.edx;
                thread->state.regs.ebx = new_state->regs.ebx;
                thread->state.regs.esp = new_state->regs.esp;
                thread->state.regs.ebp = new_state->regs.ebp;
                thread->state.regs.esi = new_state->regs.esi;
                thread->state.regs.edi = new_state->regs.edi;
                thread->state.regs.eip = new_state->regs.eip;
                thread->state.regs.eflags = (thread->state.regs.eflags & ~SAFE_EFLAGS_MASK) | (new_state->regs.eflags & SAFE_EFLAGS_MASK);
            }
            else if (thread->last_context)
            {
                thread->last_context->eax = new_state->regs.eax;
                thread->last_context->ecx = new_state->regs.ecx;
                thread->last_context->edx = new_state->regs.edx;
                thread->last_context->ebx = new_state->regs.ebx;
                thread->last_context->esp = new_state->regs.esp;
                thread->last_context->ebp = new_state->regs.ebp;
                thread->last_context->esi = new_state->regs.esi;
                thread->last_context->edi = new_state->regs.edi;
                thread->last_context->eip = new_state->regs.eip;
                thread->last_context->eflags = (thread->last_context->eflags & ~SAFE_EFLAGS_MASK) | (new_state->regs.eflags & SAFE_EFLAGS_MASK);
            }

            if (current_thread->tid != thread->tid)
            {
                memcpy(thread->state.fpu_state, new_state->fpu_state, sizeof(thread->state.fpu_state));
            }
            else
            {
                cpu_restore_fpu_state(new_state->fpu_state);
            }

            if (current_thread->tid != thread->tid) leave_critical(&critical);
        }
        else
        {
            ret = ERR_SMALLBUF;
        }

        break;

    case THREAD_PRIORITY_INFO:
        if (size >= sizeof(priority_t)) thread->priority = *((priority_t*)safe_buffer);
        else ret = ERR_SMALLBUF;

    case THREAD_AFFINITY_INFO:
        if (size >= sizeof(affinity_t)) thread->affinity = *((affinity_t*)safe_buffer);
        else ret = ERR_SMALLBUF;
        break;

    default:
        ret = ERR_INVALID;
    }

    if (get_previous_mode() == USER_MODE) free(safe_buffer);
    dereference(&thread->header);
    return ret;
}

sysret_t syscall_freeze_thread(handle_t handle)
{
    dword_t ret = ERR_SUCCESS;
    thread_t *thread;

    if (handle == INVALID_HANDLE)
    {
        thread = get_current_thread();
        reference(&thread->header);
    }
    else
    {
        if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
    }

    thread->frozen++;

    dereference(&thread->header);
    return ret;
}

sysret_t syscall_thaw_thread(handle_t handle)
{
    dword_t ret = ERR_SUCCESS;
    thread_t *thread;

    if (handle == INVALID_HANDLE)
    {
        thread = get_current_thread();
        reference(&thread->header);
    }
    else
    {
        if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
    }

    thread->frozen--;

    dereference(&thread->header);
    return ret;
}

void thread_init(void)
{
    int i;
    critical_t critical;

    memset(tid_alloc_bitmap, 0, sizeof(tid_alloc_bitmap));
    set_bit(tid_alloc_bitmap, 0);

    thread_t *main_thread = (thread_t*)malloc(sizeof(thread_t));
    if (main_thread == NULL) KERNEL_CRASH("Cannot allocate thread object");

    init_object(&main_thread->header, NULL, OBJECT_THREAD);

    if (create_object(&main_thread->header) != ERR_SUCCESS)
    {
        KERNEL_CRASH("Cannot initialize thread object");
    }

    main_thread->tid = 0;
    main_thread->priority = THREAD_PRIORITY_MID;
    main_thread->kernel_stack = malloc(KERNEL_STACK_SIZE);
    ASSERT(main_thread->kernel_stack != NULL);
    commit_pages(main_thread->kernel_stack, KERNEL_STACK_SIZE);
    main_thread->kernel_esp = ((uintptr_t)main_thread->kernel_stack + KERNEL_STACK_SIZE) & ~3;
    set_kernel_esp(main_thread->kernel_esp);
    main_thread->exit_code = 0;
    main_thread->quantum = 0;
    main_thread->running_ticks = 0ULL;
    main_thread->owner_process = kernel_process;
    list_append(&kernel_process->threads, &main_thread->in_process_list);
    main_thread->in_kernel = 1;
    main_thread->last_context = NULL;
    main_thread->terminated = FALSE;
    main_thread->previous_mode = KERNEL_MODE;
    main_thread->wait = NULL;

    memset(&main_thread->kernel_handler, 0, sizeof(main_thread->kernel_handler));
    memset(&main_thread->user_handler, 0, sizeof(main_thread->user_handler));

    enter_critical(&critical);

    current_thread = main_thread;
    for (i = 0; i < THREAD_PRIORITY_MAX; i++) list_init(&thread_queue[i]);
    scheduler_enabled = TRUE;

    leave_critical(&critical);
}
