#include "lock.h"
#include "time.h"
#include "stdio.h"
#include "sched.h"
#include "queue.h"
#include "screen.h"
#include "syscall.h"
#include "test.h"

uint64_t STACK_TOP = STACK_MIN;

pcb_t pcb[NUM_MAX_TASK];

/* current running task PCB */
pcb_t *current_running[2];

/* global process id */
pid_t process_id = 1;

uint32_t lockkey;

uint64_t owner;

/* kernel stack ^_^ */
#define NUM_KERNEL_STACK 20

static uint64_t kernel_stack[NUM_KERNEL_STACK];
static int kernel_stack_count;

static uint64_t user_stack[NUM_KERNEL_STACK];
static int user_stack_count;

/* used to fetch kernel_state error */
uint64_t *error = (uint64_t *)0x123456;

/* PID used for create task */
uint64_t PID;

/* CPU core id */
int core_id;

void init_stack()
{
}

uint64_t new_kernel_stack()
{
}

uint64_t new_user_stack()
{
}

static void free_kernel_stack(uint64_t stack_addr)
{
}

static void free_user_stack(uint64_t stack_addr)
{
}

/* Process Control Block */
void set_pcb(pid_t pid, pcb_t *pcb, task_info_t *task_info)
{
}

/* ready queue to run */
queue_t ready_queue;

/* block queue to wait */
queue_t block_queue;

/* sleeping queue to be waken */
queue_t sleeping_queue;

static void check_sleeping()
{
    uint32_t current_time = get_timer();
    if (queue_is_empty(&sleeping_queue))
        return;

    pcb_t *temp = (pcb_t *)(sleeping_queue.head);
    while (temp != NULL)
    {
        if (temp->sleeping_ddl < current_time)
        {
            queue_remove(&sleeping_queue, temp);
            temp->status = TASK_READY;
            queue_push(&ready_queue, temp);
        }
        temp = (pcb_t *)temp->next;
    }
    return;
}

void init_priority(int core_id)
{
    if (current_running[core_id]->priority <= 0)
        current_running[core_id]->priority = current_running[core_id]->init_priority;

    if (!queue_is_empty(&ready_queue))
    {
        pcb_t *temp;
        temp = ((pcb_t *)(ready_queue.head));
        while (temp != NULL)
        {
            if (temp->core != 1 - core_id)
                temp->priority = temp->init_priority;
            temp = ((pcb_t *)(temp->next));
        }
    }
}

void scheduler(void)
{
    core_id = get_cpu_id();

    if (current_running[core_id]->kernel_state == 1)
    {
        // save cursor, as this task need multi-core, thus we use two cursors, not need save anymore
        // current_running[core_id]->cursor_x = screen_cursor_x;
        // current_running[core_id]->cursor_y = screen_cursor_y;

        // change the current running task into the status ready, then push it into ready_queue
        if (current_running[core_id]->status == TASK_RUNNING)
        {
            if (current_running[core_id]->pid != 0)
                queue_push(&ready_queue, current_running[core_id]);
            current_running[core_id]->status = TASK_READY;
        }

        // check sleeping or wait task awake when ready_queue is empty
        check_sleeping();
        if (queue_is_empty(&sleeping_queue))
        {
            check_sleeping();
        }

        // init priority
        // attention!!! initialize priority while all the valid tasks(for this specific core)' priority become zero
        // What is valid task for a core?
        // tasks which attached to this core or attached to nothing!!!
        if (!queue_is_empty(&ready_queue))
        {
            pcb_t *temp;
            int all_zero = 1;
            temp = ((pcb_t *)(ready_queue.head));
            while (temp != NULL)
            {
                if (temp->priority > 0 && (temp->core != 1 - core_id))
                {
                    all_zero = 0;
                    break;
                }
                temp = ((pcb_t *)(temp->next));
            }

            if (all_zero == 1)
            {
                init_priority(core_id);
            }
        }

        // if ready_queue is empty after checking sleep, set current_running to pcb[0]
        // IMPORTANT!!! otherwise your current_running will point to an invalid space and cause TLB exception
        if (queue_is_empty(&ready_queue))
        {
            current_running[core_id] = &pcb[0];
            current_running[core_id]->status = TASK_RUNNING;
        }
        else
        {
            // --------------------------------------- Scheduling without priority (multi-core)-------------------------------------
            // pcb_t *temp;
            // temp = ((pcb_t *)(ready_queue.head));
            // while (temp != ((pcb_t *)(ready_queue.tail)))
            // {
            //     if (temp->core == core_id)
            //         break;
            //     else
            //         temp = ((pcb_t *)(temp->next));
            // }

            // if (temp == ((pcb_t *)(ready_queue.tail)) && temp->core != core_id)
            // {
            //     current_running[core_id] = queue_dequeue(&ready_queue);
            //     current_running[core_id]->status = TASK_RUNNING;
            // }
            // else
            // {
            //     current_running[core_id] = temp;
            //     queue_remove(&ready_queue, temp);
            //     current_running[core_id]->status = TASK_RUNNING;
            // }

            // --------------------------------------- Scheduling without priority (single-core) ------------------------------------
            // current_running[core_id] = queue_dequeue(&ready_queue);
            // current_running[core_id]->status = TASK_RUNNING;

            // --------------------------------------- Prioritized scheduling (multi-core) ------------------------------------------
            // multi-core context switching based on priority
            pcb_t *temp = ((pcb_t *)(ready_queue.head));
            pcb_t *ptr = ((pcb_t *)(ready_queue.head));
            int max = 0;

            // find the highest priority task which is not attached to the other core
            while (ptr != NULL)
            {
                if (ptr->core != (1 - core_id) && ptr->priority > max)
                {
                    temp = ptr;
                    max = ptr->priority;
                }
                ptr = ((pcb_t *)(ptr->next));
            }
            // if temp equals to ready_queue.head, which means that all tasks in ready_queue
            // are attached to the other core,
            // thus we point current_running[core_id] to pcb[0], an infinite loop
            if (temp == ((pcb_t *)(ready_queue.head)) && (temp->core == 1 - core_id))
            {
                current_running[core_id] = &pcb[0];
                current_running[core_id]->status = TASK_RUNNING;
            }
            else
            {
                temp->priority--;
                current_running[core_id] = temp;
                queue_remove(&ready_queue, temp);
                current_running[core_id]->status = TASK_RUNNING;
            }
        }

        // // used for debug: observe valid tasks' status in pcb, also core condition
        // vt100_move_cursor(0, 15);
        // int i;
        // current_running[core_id]->sched_time++;
        // for (i = 0; i < NUM_MAX_TASK; i++)
        // {
        //     if (pcb[i].status != TASK_EXITED)
        //     {
        //         printk("\rPID:%d priority:%d time:%d task_core:%d core:%d\n", pcb[i].pid, pcb[i].priority, pcb[i].sched_time, pcb[i].core, core_id);
        //     }
        // }

        // re-load cursor(not need anymore for multi-core)
        // screen_cursor_x = current_running[core_id]->cursor_x;
        // screen_cursor_y = current_running[core_id]->cursor_y;
    }
    else
    {
        *error = 0;
    }
}

void do_sleep(uint32_t sleep_time)
{
    if (current_running[core_id]->kernel_state == 1)
    {
        if (current_running[core_id]->status == TASK_RUNNING)
        {
            current_running[core_id]->status = TASK_SLEEPING;
            current_running[core_id]->sleeping_ddl = get_timer() + sleep_time;
            current_running[core_id]->priority = current_running[core_id]->init_priority;
            queue_push(&sleeping_queue, current_running[core_id]);
        }
        do_scheduler();
    }
    else
    {
        *error = 0;
    }
}

void do_exit(void)
{
    int i;
    for (i = 0; i < LOCK_TOTAL_NUM; i++)
    {
        if (current_running[core_id]->lock[i] != 0)
        {
            do_mutex_lock_release(&Lock[i]);
        }
    }

    while (!queue_is_empty(&(current_running[core_id]->waiting_queue)))
    {
        pcb_t *head = queue_dequeue(&(current_running[core_id]->waiting_queue));
        head->status = TASK_READY;
        queue_push(&ready_queue, head);
    }

    current_running[core_id]->status = TASK_EXITED;
    do_scheduler();
}

void do_block(queue_t *queue)
{
    if (current_running[core_id]->kernel_state == 1)
    {
        if (current_running[core_id]->status == TASK_RUNNING)
        {
            current_running[core_id]->status = TASK_BLOCKED;
            queue_push(queue, current_running[core_id]);
        }
        // store current context and fetch the ready task
        do_scheduler();
    }
    else
    {
        *error = 0;
    }
}

void do_unblock_one(queue_t *queue)
{
    if (current_running[core_id]->kernel_state == 1)
    {
        pcb_t *unblock_p;
        unblock_p = queue_dequeue(queue);
        unblock_p->priority = unblock_p->init_priority;
        unblock_p->status = TASK_READY;
        queue_push(&ready_queue, unblock_p);
    }
    else
    {
        *error = 0;
    }
}

void do_unblock_all(queue_t *queue)
{
    if (current_running[core_id]->kernel_state == 1)
    {
        while (!queue_is_empty(queue))
        {
            do_unblock_one(queue);
        }
    }
    else
    {
        *error = 0;
    }
}

int do_spawn(task_info_t *task)
{
    int i = 1;
    // get a position to set new pcb
    while (pcb[i].status != TASK_EXITED)
    {
        i++;
        if (i >= NUM_MAX_TASK)
            return;
    }

    // set a new pcb
    //bzero(&pcb[i], sizeof(pcb[i])); //initial

    pcb[i].pid = PID;
    PID++;
    strcpy(pcb[i].name, task->name);
    pcb[i].prev = NULL;
    pcb[i].next = NULL;

    pcb[i].entry_point = task->entry_point;
    pcb[i].type = task->type;
    pcb[i].status = TASK_READY;
    pcb[i].kernel_state = 0;
    pcb[i].priority = task->priority;
    pcb[i].init_priority = task->priority;
    pcb[i].sleeping_ddl = 0;

    pcb[i].kernel_context.reg[31] = (uint64_t)first_entry;
    pcb[i].kernel_context.reg[29] = STACK_TOP + (2 * i) * STACK_SIZE;
    pcb[i].user_context.reg[29] = STACK_TOP + (2 * i + 1) * STACK_SIZE;
    pcb[i].kernel_context.cp0_status = 0x10008001;
    pcb[i].user_context.cp0_status = 0x10008001;
    pcb[i].kernel_context.cp0_epc = task->entry_point;
    pcb[i].user_context.cp0_epc = task->entry_point;

    pcb[i].cursor_x = 0;
    pcb[i].cursor_y = 0;

    // init lock[]
    int k;
    for (k = 0; k < LOCK_TOTAL_NUM; k++)
    {
        pcb[i].lock[k] = NULL;
    }
    pcb[i].lock_num = 0;

    queue_init(&(pcb[i].waiting_queue));

    queue_push(&ready_queue, &pcb[i]);

    return pcb[i].pid;
}

int do_kill(pid_t pid)
{
    int i = 0, j = 0;
    while (pcb[i].pid != pid)
    {
        i++;
        if (i >= NUM_MAX_TASK)
            return;
    }

    // release the lock pcb[i] holds
    for (j = 0; j < LOCK_TOTAL_NUM; j++)
    {
        if (pcb[i].lock[j] != 0)
        {
            do_mutex_lock_release(&Lock[j]);
            pcb[i].lock[j] = 0;
        }
    }

    // remove the pcb from the queue it's in
    if (pcb[i].status != TASK_EXITED)
    {
        if (pcb[i].status == TASK_READY)
        {
            queue_remove(&ready_queue, &pcb[i]);
        }
        else if (pcb[i].status == TASK_SLEEPING)
        {
            queue_remove(&sleeping_queue, &pcb[i]);
        }
        else if (pcb[i].status == TASK_BLOCKED)
        {
            int j;
            for (j = 0; j < LOCK_TOTAL_NUM; j++)
            {
                if (check_in_queue(&(Lock[j].mutex_lock_queue), &pcb[i]))
                {
                    queue_remove(&(Lock[j].mutex_lock_queue), &pcb[i]);
                }
            }
        }

        pcb[i].status = TASK_EXITED;
    }

    // waking up the tasks blocked in pcb[i]'s waiting queue
    while (!queue_is_empty(&(pcb[i].waiting_queue)))
    {
        pcb_t *head = queue_dequeue(&(pcb[i].waiting_queue));
        head->status = TASK_READY;
        queue_push(&ready_queue, head);
    }

    if (current_running[core_id]->pid == pid)
    {
        do_scheduler();
    }
}

int do_waitpid(pid_t pid)
{
    int i = 1;
    while (pcb[i].pid != pid)
    {
        i++;
        if (i >= NUM_MAX_TASK)
            return;
    }

    // block current_running[core_id] pcb into pcb[i]'s waiting queue
    if (pcb[i].status != TASK_EXITED)
    {
        current_running[core_id]->status = TASK_BLOCKED;
        queue_push(&pcb[i].waiting_queue, current_running[core_id]);
        do_scheduler();
    }
}

// process show
void do_process_show()
{
    int i;
    int num = 0;
    for (i = 0; i < NUM_MAX_TASK; i++)
    {
        if (pcb[i].status == TASK_READY || pcb[i].status == TASK_BLOCKED ||
            pcb[i].status == TASK_SLEEPING || pcb[i].status == TASK_RUNNING)
        {
            ProcessShow[num].num = num;
            ProcessShow[num].pid = pcb[i].pid;
            ProcessShow[num].status = pcb[i].status;
            num++;
        }
        else
        {
            ProcessShow[num].num = -1;
        }
    }
}

pid_t do_getpid()
{
    return current_running[core_id]->pid;
}

// set info task on specific core
int do_task_set(task_info_t *task, int task_core_id)
{
    int pid;
    pid = do_spawn(task);
    int i = 0;
    for (i = 0; i < NUM_MAX_TASK; i++)
    {
        if (pcb[i].pid == pid)
        {
            pcb[i].core = task_core_id;
            break;
        }
    }
    return pid;
}

int do_task_set_pid(int pid, int task_core_id)
{
    int i = 1;
    for (i = 1; i < NUM_MAX_TASK; i++)
    {
        if (pcb[i].pid == pid)
        {
            pcb[i].core = task_core_id;
            break;
        }
    }
    return pid;
}