#include <linux/list.h>
#include <linux/sched/rt.h>
#include "../inc/sched.h"

int sched_rr_timeslice = RR_TIMESLICE;

/***********************************************************/
#define for_each_sched_rt_entity(rt_se) \
    for (; rt_se; rt_se = NULL)

static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
}

static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
    return container_of(rt_se, struct task_struct, rt);
}

static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
{
    struct task_struct *p = rt_task_of(rt_se);

    return task_rq(p);
}

static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
    struct rq *rq = rq_of_rt_se(rt_se);

    return &rq->rt;
}

static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
    return NULL;
}

static inline int rt_se_prio(struct sched_rt_entity *rt_se)
{
    return rt_task_of(rt_se)->prio;
}

static inline unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
{
    return 1;
}

static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
    return container_of(rt_rq, struct rq, rt);
}

static inline unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
{
    struct task_struct *tsk;

    tsk = rt_task_of(rt_se);

    return (tsk->policy == SCHED_RR) ? 1 : 0;
}

static inline void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
    struct rq *rq = rq_of_rt_rq(rt_rq);

    rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
    rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
    add_nr_running(rq, 1);
}

/*
 * Change rt_se->run_list location unless SAVE && !MOVE
 *
 * assumes ENQUEUE/DEQUEUE flags match
 */
static inline bool move_entity(unsigned int flags)
{
    if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
        return false;

    return true;
}

static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
{
    unsigned int prio;

    list_del_init(&rt_se->run_list);

    prio = rt_se_prio(rt_se);
    if (list_empty(array->queue + prio))
        __clear_bit(prio, array->bitmap);

    rt_se->on_list = 0;
}

static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
    struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
    struct rt_prio_array *array = &rt_rq->active;
    struct rt_rq *group_rq = group_rt_rq(rt_se);
    struct list_head *queue = array->queue + rt_se_prio(rt_se);

    if (move_entity(flags))
    {
        if (flags & ENQUEUE_HEAD)
            list_add(&rt_se->run_list, queue);
        else
            list_add_tail(&rt_se->run_list, queue);

        __set_bit(rt_se_prio(rt_se), array->bitmap);
    }

    rt_se->on_rq = 1;

    inc_rt_tasks(rt_se, rt_rq);
}

static void enqueue_top_rt_rq(struct rt_rq *rt_rq)
{
    if (rt_rq->rt_queued)
        return;

    if (rt_rq->rt_nr_running)
    {
        rt_rq->rt_queued = 1;
    }
}

static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
    struct rq *rq = rq_of_rt_se(rt_se);

    for_each_sched_rt_entity(rt_se)
        __enqueue_rt_entity(rt_se, flags);

    enqueue_top_rt_rq(&rq->rt);
}

static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
    struct sched_rt_entity *rt_se = &p->rt;

    enqueue_rt_entity(rt_se, flags);
}

static inline bool sched_rt_runnable(struct rq *rq)
{
    return rq->rt.rt_queued > 0;
}

static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
{
    struct rt_prio_array *array = &rt_rq->active;
    struct sched_rt_entity *next = NULL;
    struct list_head *queue;
    int idx;

    idx = sched_find_first_bit(array->bitmap);
    BUG_ON(idx >= MAX_RT_PRIO);

    queue = array->queue + idx;
    if (SCHED_WARN_ON(list_empty(queue)))
        return NULL;
    next = list_entry(queue->next, struct sched_rt_entity, run_list);

    return next;
}

static struct task_struct *_pick_next_task_rt(struct rq *rq)
{
    struct sched_rt_entity *rt_se;
    struct rt_rq *rt_rq = &rq->rt;

    do
    {
        rt_se = pick_next_rt_entity(rt_rq);
        if (unlikely(!rt_se))
            return NULL;
        rt_rq = group_rt_rq(rt_se);
    } while (rt_rq);

    return rt_task_of(rt_se);
}

static struct task_struct *pick_task_rt(struct rq *rq)
{
    struct task_struct *p;

    if (!sched_rt_runnable(rq))
        return NULL;

    p = _pick_next_task_rt(rq);

    return p;
}

static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_struct *next)
{
}

static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
{
}

static inline int on_rt_rq(struct sched_rt_entity *rt_se)
{
    return rt_se->on_rq;
}

/*
 * Put task to the head or the end of the run list without the overhead of
 * dequeue followed by enqueue.
 */
static void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
{
    if (on_rt_rq(rt_se))
    {
        struct rt_prio_array *array = &rt_rq->active;
        struct list_head *queue = array->queue + rt_se_prio(rt_se);

        if (head)
            list_move(&rt_se->run_list, queue);
        else
            list_move_tail(&rt_se->run_list, queue);
    }
}

static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
{
    struct sched_rt_entity *rt_se = &p->rt;
    struct rt_rq *rt_rq;

    for_each_sched_rt_entity(rt_se)
    {
        rt_rq = rt_rq_of_se(rt_se);
        requeue_rt_entity(rt_rq, rt_se, head);
    }
}

/*
 * Update the current task's runtime statistics. Skip current tasks that
 * are not in our scheduling class.
 */
static void update_curr_rt(struct rq *rq)
{
    struct task_struct *donor = rq->donor;
    s64 delta_exec;

    if (donor->sched_class != &rt_sched_class)
        return;

    delta_exec = update_curr_common(rq);
    if (unlikely(delta_exec <= 0))
        return;

#ifdef CONFIG_RT_GROUP_SCHED
    struct sched_rt_entity *rt_se = &donor->rt;

    if (!rt_bandwidth_enabled())
        return;

    for_each_sched_rt_entity(rt_se)
    {
        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
        int exceeded;

        if (sched_rt_runtime(rt_rq) != RUNTIME_INF)
        {
            raw_spin_lock(&rt_rq->rt_runtime_lock);
            rt_rq->rt_time += delta_exec;
            exceeded = sched_rt_runtime_exceeded(rt_rq);
            if (exceeded)
                resched_curr(rq);
            raw_spin_unlock(&rt_rq->rt_runtime_lock);
            if (exceeded)
                do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
        }
    }
#endif
}

static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
{
    struct sched_rt_entity *rt_se = &p->rt;

    update_curr_rt(rq);

    /*
     * RR tasks need a special form of time-slice management.
     * FIFO tasks have no timeslices.
     */
    if (p->policy != SCHED_RR)
        return;

    if (--(p->rt.time_slice))
        return;

    p->rt.time_slice = sched_rr_timeslice;

    /*
     * Requeue to the end of queue if we (and all of our ancestors) are not
     * the only element on the queue
     */
    for_each_sched_rt_entity(rt_se)
    {
        if (rt_se->run_list.prev != rt_se->run_list.next)
        {
            requeue_task_rt(rq, p, 0);
            resched_curr(rq);
            return;
        }
    }
}

static inline void rt_queue_pull_task(struct rq *rq)
{
}

static inline void rt_queue_push_tasks(struct rq *rq)
{
}

static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
    struct rq *rq = rq_of_rt_se(rt_se);

    for_each_sched_rt_entity(rt_se)
    {
        struct rt_rq *rt_rq = group_rt_rq(rt_se);

        if (rt_rq && rt_rq->rt_nr_running)
            __enqueue_rt_entity(rt_se, flags);
    }

    enqueue_top_rt_rq(&rq->rt);
}

static inline void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
    struct rq *rq = rq_of_rt_rq(rt_rq);

    rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
    rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
    sub_nr_running(rq, 1);
}

static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
    struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
    struct rt_prio_array *array = &rt_rq->active;

    if (move_entity(flags))
    {
        WARN_ON_ONCE(!rt_se->on_list);
        __delist_rt_entity(rt_se, array);
    }
    rt_se->on_rq = 0;

    dec_rt_tasks(rt_se, rt_rq);
}

static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
{
    struct rq *rq = rq_of_rt_rq(rt_rq);

    BUG_ON(&rq->rt != rt_rq);

    if (!rt_rq->rt_queued)
        return;

    BUG_ON(!rq->nr_running);
    rt_rq->rt_queued = 0;
}

/*
 * Because the prio of an upper entry depends on the lower
 * entries, we must remove entries top - down.
 */
static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
{
    struct sched_rt_entity *back = NULL;
    unsigned int rt_nr_running;

    for_each_sched_rt_entity(rt_se)
    {
        rt_se->back = back;
        back = rt_se;
    }

    rt_nr_running = rt_rq_of_se(back)->rt_nr_running;

    for (rt_se = back; rt_se; rt_se = rt_se->back)
    {
        if (on_rt_rq(rt_se))
            __dequeue_rt_entity(rt_se, flags);
    }

    dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
}

static bool dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
    struct sched_rt_entity *rt_se = &p->rt;

    dequeue_rt_stack(rt_se, flags);

    update_curr_rt(rq);
    dequeue_rt_entity(rt_se, flags);

    dequeue_pushable_task(rq, p);

    return true;
}

static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
{
    struct task_struct *donor = rq->donor;

    if (p->prio < donor->prio)
    {
        resched_curr(rq);
        return;
    }
}

/*
 * When switching a task to RT, we may overload the runqueue
 * with RT tasks. In this case we try to push them off to
 * other runqueues.
 */
static void switched_to_rt(struct rq *rq, struct task_struct *p)
{
    /*
     * If we are running, update the avg_rt tracking, as the running time
     * will now on be accounted into the latter.
     */
    if (task_current(rq, p))
    {
        update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
        return;
    }

    /*
     * If we are not running we may need to preempt the current
     * running task. If that current running task is also an RT task
     * then see if we can move to another run queue.
     */
    if (task_on_rq_queued(p))
    {
#ifdef CONFIG_SMP
        if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
            rt_queue_push_tasks(rq);
#endif /* CONFIG_SMP */
        if (p->prio < rq->donor->prio && cpu_online(cpu_of(rq)))
            resched_curr(rq);
    }
}

/*
 * Priority of the task has changed. This may cause
 * us to initiate a push or pull.
 */
static void prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
{
    if (!task_on_rq_queued(p))
        return;

    if (task_current_donor(rq, p))
    {
#ifdef CONFIG_SMP
        /*
         * If our priority decreases while running, we
         * may need to pull tasks to this runqueue.
         */
        if (oldprio < p->prio)
            rt_queue_pull_task(rq);

        /*
         * If there's a higher priority task waiting to run
         * then reschedule.
         */
        if (p->prio > rq->rt.highest_prio.curr)
            resched_curr(rq);
#else
        /* For UP simply resched on drop of prio */
        if (oldprio < p->prio)
            resched_curr(rq);
#endif /* CONFIG_SMP */
    }
    else
    {
        /*
         * This task is not running, but if it is
         * greater than the current running task
         * then reschedule.
         */
        if (p->prio < rq->donor->prio)
            resched_curr(rq);
    }
}

static void init_rt_rq(struct rq *rq)
{
    struct rt_rq *rt_rq = &rq->rt;

    struct rt_prio_array *array;
    int i;

    array = &rt_rq->active;
    for (i = 0; i < MAX_RT_PRIO; i++)
    {
        INIT_LIST_HEAD(array->queue + i);
        __clear_bit(i, array->bitmap);
    }

    /* delimiter for bitsearch: */
    __set_bit(MAX_RT_PRIO, array->bitmap);

    rt_rq->rt_queued = 0;
}

DEFINE_SCHED_CLASS(rt) = {
    .enqueue_task = enqueue_task_rt,
    .dequeue_task = dequeue_task_rt,
    .pick_task = pick_task_rt,
    .put_prev_task = put_prev_task_rt,
    .set_next_task = set_next_task_rt,

    .task_tick = task_tick_rt,
    .wakeup_preempt = wakeup_preempt_rt,

    .prio_changed = prio_changed_rt,
    .switched_to = switched_to_rt,
    .update_curr = update_curr_rt,

    .init_rq = init_rt_rq,
};

void init_rt_entity(struct task_struct *p)
{
    INIT_LIST_HEAD(&p->rt.run_list);
    p->rt.time_slice = sched_rr_timeslice;
    p->rt.on_rq = 0;
    p->rt.on_list = 0;
}
