#include "../inc/sched.h"
#include <linux/sched/clock.h>
#include <linux/sched/deadline.h>
#include <linux/percpu.h>

static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);

struct rq *cpu_rq(unsigned cpu)
{
    return &per_cpu(runqueues, (cpu));
}

struct rq *task_rq(const struct task_struct *tsk)
{
    return cpu_rq(task_cpu(tsk));
}

static void update_rq_clock_task(struct rq *rq, s64 delta)
{
    rq->clock_task += delta;
}

void raw_spin_rq_lock_irq(struct rq *rq)
{
    local_irq_disable();
    raw_spin_rq_lock(rq);
}

void raw_spin_rq_unlock_irq(struct rq *rq)
{
    raw_spin_rq_unlock(rq);
    local_irq_enable();
}

void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
{
    /* Matches synchronize_rcu() in __sched_core_enable() */
    preempt_disable();
    raw_spin_lock_nested(&rq->__lock, subclass);
    /* preempt_count *MUST* be > 1 */
    preempt_enable_no_resched();
}

void raw_spin_rq_unlock(struct rq *rq)
{
    raw_spin_unlock(rq_lockp(rq));
}

void raw_spin_rq_lock(struct rq *rq)
{
    raw_spin_rq_lock_nested(rq, 0);
}

void update_rq_clock(struct rq *rq)
{
    s64 delta;
    u64 clock;

    if (rq->clock_update_flags & RQCF_ACT_SKIP)
        return;

    clock = sched_clock_cpu(cpu_of(rq));

    delta = clock - rq->clock;
    if (delta < 0)
        return;
    rq->clock += delta;
    update_rq_clock_task(rq, delta);
}

int sched_find_first_bit(const unsigned long *b)
{
    int ret;

#if BITS_PER_LONG == 64
    if (b[0])
        ret = __ffs(b[0]);
    else
        ret = __ffs(b[1]) + 64;
#elif BITS_PER_LONG == 32
    if (b[0])
        ret = __ffs(b[0]);
    else if (b[1])
        ret = __ffs(b[1]) + 32;
    else if (b[2])
        ret = __ffs(b[2]) + 64;
    else
        ret = __ffs(b[3]) + 96;
#else
#error BITS_PER_LONG not defined
#endif

    return ret;
}

void sub_nr_running(struct rq *rq, unsigned count)
{
    rq->nr_running -= count;
}

void add_nr_running(struct rq *rq, unsigned count)
{
    rq->nr_running += count;
}

/*
 * __task_rq_lock - lock the rq @p resides on.
 */
struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
    __acquires(rq->lock)
{
    struct rq *rq;

    lockdep_assert_held(&p->pi_lock);

    for (;;)
    {
        rq = task_rq(p);
        raw_spin_rq_lock(rq);
        if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
        {
            rq_pin_lock(rq, rf);
            break;
        }
        raw_spin_rq_unlock(rq);

        while (unlikely(task_on_rq_migrating(p)))
            cpu_relax();
    }

    return rq;
}

void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
    __releases(rq->lock)
{
    rq_unpin_lock(rq, rf);
    raw_spin_rq_unlock(rq);
}

void task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
{
    rq_unpin_lock(rq, rf);
    raw_spin_rq_unlock(rq);
    raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
}

void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
{
    p->sched_class->enqueue_task(rq, p, flags);
}

void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
    enqueue_task(rq, p, flags);

    WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
}

bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
    return p->sched_class->dequeue_task(rq, p, flags);
}

void put_prev_set_next_task(struct rq *rq,
                            struct task_struct *prev, struct task_struct *next)
{
    WARN_ON_ONCE(rq->curr != prev);

    if (next == prev)
        return;

    prev->sched_class->put_prev_task(rq, prev, next);
    next->sched_class->set_next_task(rq, next, true);
}

/*
 * ->switching_to() is called with the pi_lock and rq_lock held and must not
 * mess with locking.
 */
void check_class_changing(struct rq *rq, struct task_struct *p,
                          const struct sched_class *prev_class)
{
    if (prev_class != p->sched_class && p->sched_class->switching_to)
        p->sched_class->switching_to(rq, p);
}

/*
 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
 * use the balance_callback list if you want balancing.
 *
 * this means any call to check_class_changed() must be followed by a call to
 * balance_callback().
 */
void check_class_changed(struct rq *rq, struct task_struct *p,
                         const struct sched_class *prev_class,
                         int oldprio)
{
    if (prev_class != p->sched_class)
    {
        if (prev_class->switched_from)
            prev_class->switched_from(rq, p);

        p->sched_class->switched_to(rq, p);
    }
    else if (oldprio != p->prio || dl_task(p))
        p->sched_class->prio_changed(rq, p, oldprio);
}

void set_next_task(struct rq *rq, struct task_struct *next)
{
    next->sched_class->set_next_task(rq, next, false);
}

void put_prev_task(struct rq *rq, struct task_struct *prev)
{
	WARN_ON_ONCE(rq->donor != prev);
	prev->sched_class->put_prev_task(rq, prev, NULL);
}
