#include <sched.h>
#include <process.h>
#include <sys/unistd.h>
#include <hal/apic.h>
#include <arch/cpu.h>
#include <timer.h>

extern struct process process_table[MAX_PROCESS];

static volatile unsigned lookup_proc_ptr = 1;

extern volatile u32 sched_counter;

inline bool is_proc_can_run(struct process* proc) {
    if (cur_proc->state != PROCESS_UNUSED
        && cur_proc->state != PROCESS_WAITING
        && cur_proc->state != PROCESS_SLEEPING
        && cur_proc->state != PROCESS_BLOCKING
    ){
        return true;
    }
    return false;
}

void dead_lock_handler(struct process* proc) {
    if (proc->state != PROCESS_WAITING) {
        return;
    }

    if (list_empty(&proc->waitq)) {
        proc->state = PROCESS_READY;
        return;
    }

    waitq_t* waitq = list_entry(proc->waitq.next, waitq_t, queue);
    if (waitq->deadlock_checker) {
        if (waitq->deadlock_checker(waitq)) {
            proc->state = PROCESS_READY;
        }
    }
}

void schedule() {
    struct process* p = NULL;
    unsigned old_ptr = lookup_proc_ptr;
    unsigned upper_bound = MAX_PROCESS;
    while (!p && lookup_proc_ptr < upper_bound) {
        if (&process_table[lookup_proc_ptr] != cur_proc && process_table[lookup_proc_ptr].state == PROCESS_READY) {
            p = &process_table[lookup_proc_ptr];
            break;
        } else {
            dead_lock_handler(&process_table[lookup_proc_ptr]);
            lookup_proc_ptr++;
            if (lookup_proc_ptr >= upper_bound && old_ptr != 0) {
                lookup_proc_ptr = 0;
                upper_bound = old_ptr;
                old_ptr = 0;
            }
        }
    }

    if (!p) {
        return;
    }
    sched_counter = 0;
    if (is_proc_can_run(cur_proc)) {
        cur_proc->state = PROCESS_READY;
    }

    p->state = PROCESS_RUNNING;

    // we need manually sent EOI to APIC
    // because the switch_to will jmp over the interrupt handler
    apic_done_service();
    asm volatile("pushl %0\n"
        "jmp switch_to\n" ::"r"(p)
        : "memory");
}

void uninterruptible_yield() {
    cli();
    yield();
    sti();
}

void sleep_cb(void* arg) {
    struct process* proc = arg;
    proc->state = PROCESS_READY;
}

__DEF_SYSCALL1(int, sleep, u32, millisecond) {
    cur_proc->state = PROCESS_SLEEPING;
    timer_create(millisecond, TIMER_ONESHOT, sleep_cb, cur_proc);
    schedule();
    return 0;
}

__DEF_SYSCALL0(int, pause) {
    return ENOSET;
}

__DEF_SYSCALL3(pid_t, waitpid, pid_t, pid, int*, status, int, option) {
    return ENOSET;
}

__DEF_SYSCALL0(int, yield) {
    schedule();
    return 0;
}

__DEF_SYSCALL1(int, wait, int*, status) {
    cur_proc->state = PROCESS_BLOCKING;
    if (status) {
        *status = 0;
    }
    schedule();
    return 0;
}