#include "linux/common.h"
#include "linux/interrupt.h"
#include "linux/smp.h"
#include "linux/printk.h"
#include "linux/preempt.h"
#include "linux/sched.h"
#include "linux/percpu-defs.h"

#include "asm/mmu_context.h"


DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);

void init_idle(struct task_struct *idle, int cpu)
{
    struct rq *rq = cpu_rq(cpu);
    rq->curr = rq->idle = idle;
}

void sched_init(void)
{
    struct rq *rq;
    int cpu;

    for (cpu = 0; cpu < NR_CPUS; cpu++)
    {
        rq = cpu_rq(cpu);

        raw_spin_lock_init(&rq->lock);
        rq->nr_running = 0;
        rq->curr = NULL;
        rq->idle = NULL;
        rq->cpu = cpu;

        rq->cfs.nr_running = 0;
        INIT_LIST_HEAD(&rq->cfs.tasks);
        init_idle(current, cpu);
    }
}

void sched_clock_init(void)
{

}

struct task_struct *pick_next_task(struct rq *rq)
{
    struct task_struct *p = NULL;
    struct list_head *list;

    if (rq->curr == &init_task)
        list = rq->cfs.tasks.next;
    else
        list = current->run_list.next;

    while (1)
    {
        if (list == &rq->cfs.tasks)
            list = list->next;
        p = list_entry(list, struct task_struct, run_list);
        if (p->state == PROCESS_READY)
            break;
        list = list->next;
    }

    return p;
}

struct task_struct *current_debug_p;

static struct rq *finish_task_switch(struct task_struct *task)
{
    return NULL;
}

static struct rq* context_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
{
    switch_mm(prev->mm, next->mm, next);

    switch_to(prev, next, prev);
    
    return finish_task_switch(prev);
}

void __schedule(void)
{
    int cpu = 0;
    struct rq *rq;
    struct task_struct *prev, *next;

    cpu = smp_processor_id();
    rq = cpu_rq(cpu);

    prev = rq->curr;

    raw_spin_lock(&rq->lock);

    next = pick_next_task(rq);
    
    raw_spin_unlock(&rq->lock);

    rq->curr = next;

    current_debug_p = rq->curr;
    
    rq = context_switch(rq, prev, next);
}

void schedule(void)
{
    preempt_disable();
    local_irq_disable();
	
    __schedule();

    local_irq_enable();
    preempt_enable();
}

void schedule_preempt_disabled(void)
{
	//sched_preempt_enable_no_resched();
	schedule();
	preempt_disable();
}



struct rq *__task_rq_lock(struct task_struct *p, unsigned long *flags)
{
    struct rq *rq;
    
    rq = task_rq(p);
    raw_spin_lock(&rq->lock);

    return rq;
}

void task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long flags)
{
    raw_spin_unlock(&rq->lock);
    raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}

void wake_up_new_task(struct task_struct *p)
{
    unsigned long flags;
    struct rq *rq;

    raw_spin_lock_irqsave(&p->pi_lock, &flags);
    
    rq = __task_rq_lock(p, &flags);

    list_add_tail(&p->run_list, &rq->cfs.tasks);

    task_rq_unlock(rq, p, flags);
}

void wake_up_process(struct task_struct *p)
{

}