/*
 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_WRR
 * policies)
 */

#define for_each_leaf_wrr_rq(wrr_rq, rq) \
    for (wrr_rq = &rq->wrr; wrr_rq; wrr_rq = NULL)

#define for_each_sched_wrr_entity(wrr_se) \
    for (; wrr_se; wrr_se = NULL)

#define wrr_entity_is_task(wrr_se) (1)

static inline struct task_struct *wrr_task_of(struct sched_wrr_entity *wrr_se)
{
    return container_of(wrr_se, struct task_struct, wrr);
}

static inline struct rq *rq_of_wrr_rq(struct wrr_rq *wrr_rq)
{
    return container_of(wrr_rq, struct rq, wrr);
}

static inline struct wrr_rq *wrr_rq_of_se(struct sched_wrr_entity *wrr_se)
{
    struct task_struct *p = wrr_task_of(wrr_se);
    struct rq *rq = task_rq(p);

    return &rq->wrr;
}

static inline int on_wrr_rq(struct sched_wrr_entity *wrr_se)
{
    return !list_empty(&wrr_se->run_list);
}



/*
 * Update the current task's runtime statistics. Skip current tasks that
 * are not in our scheduling class.
 */
static void update_curr_wrr(struct rq *rq)
{
    struct task_struct *curr = rq->curr;
    struct sched_wrr_entity *wrr_se = &curr->wrr;
    struct wrr_rq *wrr_rq = wrr_rq_of_se(wrr_se);
    u64 delta_exec;

    if (!task_has_wrr_policy(curr))
        return;

    delta_exec = rq->clock - curr->se.exec_start;
    if (unlikely((s64)delta_exec < 0))
        delta_exec = 0;

    schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));

    curr->se.sum_exec_runtime += delta_exec;
    account_group_exec_runtime(curr, delta_exec);

    curr->se.exec_start = rq->clock;
    cpuacct_charge(curr, delta_exec);

    sched_wrr_avg_update(rq, delta_exec);

}






static inline
void inc_wrr_tasks(struct sched_wrr_entity *wrr_se, struct wrr_rq *wrr_rq)
{
    wrr_rq->wrr_nr_running++;
}

static inline
void dec_wrr_tasks(struct sched_wrr_entity *wrr_se, struct wrr_rq *wrr_rq)
{
    wrr_rq->wrr_nr_running--;
}

static void __enqueue_wrr_entity(struct sched_wrr_entity *wrr_se)
{
    struct wrr_rq *wrr_rq = wrr_rq_of_se(wrr_se);
    struct list_head *queue = &wrr_rq->queue;

    list_add_tail(&wrr_se->run_list, queue);

    inc_wrr_tasks(wrr_se, wrr_rq);
}

static void __dequeue_wrr_entity(struct sched_wrr_entity *wrr_se)
{
    struct wrr_rq *wrr_rq = wrr_rq_of_se(wrr_se);

    list_del_init(&wrr_se->run_list);

    dec_wrr_tasks(wrr_se, wrr_rq);
}


static void enqueue_wrr_entity(struct sched_wrr_entity *wrr_se)
{
    for_each_sched_wrr_entity(wrr_se)
        __enqueue_wrr_entity(wrr_se);
}

static void dequeue_wrr_entity(struct sched_wrr_entity *wrr_se)
{

    for_each_sched_wrr_entity(wrr_se) {
        struct wrr_rq *wrr_rq = wrr_rq_of_se(wrr_se);

        if (wrr_rq && wrr_rq->wrr_nr_running)
            __enqueue_wrr_entity(wrr_se);
    }
}

/*
 * Adding/removing a task to/from a priority array:
 */
static void enqueue_task_wrr(struct rq *rq, struct task_struct *p, int wakeup)
{
    struct sched_wrr_entity *wrr_se = &p->wrr;

    if (wakeup)
        wrr_se->timeout = 0;

    enqueue_wrr_entity(wrr_se);
}

static void dequeue_task_wrr(struct rq *rq, struct task_struct *p, int sleep)
{
    struct sched_wrr_entity *wrr_se = &p->wrr;

    update_curr_wrr(rq);
    dequeue_wrr_entity(wrr_se);

}

/*
 * Put task to the end of the run list without the overhead of dequeue
 * followed by enqueue.
 */
static void
requeue_wrr_entity(struct wrr_rq *wrr_rq, struct sched_wrr_entity *wrr_se, int head)
{
    if (on_wrr_rq(wrr_se)) {
        struct list_head *queue = &wrr_rq->queue;

        if (head)
            list_move(&wrr_se->run_list, queue);
        else
            list_move_tail(&wrr_se->run_list, queue);
    }
}

static void requeue_task_wrr(struct rq *rq, struct task_struct *p, int head)
{
    struct sched_wrr_entity *wrr_se = &p->wrr;
    struct wrr_rq *wrr_rq;

    for_each_sched_wrr_entity(wrr_se) {
        wrr_rq = wrr_rq_of_se(wrr_se);
        requeue_wrr_entity(wrr_rq, wrr_se, head);
    }
}

static void yield_task_wrr(struct rq *rq)
{
    requeue_task_wrr(rq, rq->curr, 0);
}



/*
 * Preempt the current task with a newly woken task if needed:
 */
static void check_preempt_curr_wrr(struct rq *rq, struct task_struct *p, int flags)
{
    return;
}

static struct sched_wrr_entity *pick_next_wrr_entity(struct rq *rq,
                           struct wrr_rq *wrr_rq)
{
    struct sched_wrr_entity *next = NULL;
    struct list_head *queue = &wrr_rq->queue;

    next = list_entry(queue->next, struct sched_wrr_entity, run_list);

    return next;
}

static struct task_struct *_pick_next_task_wrr(struct rq *rq)
{
    struct sched_wrr_entity *wrr_se;
    struct task_struct *p;
    struct wrr_rq *wrr_rq;

    wrr_rq = &rq->wrr;

    if (unlikely(!wrr_rq->wrr_nr_running)){
        return NULL;
    }


    wrr_se = pick_next_wrr_entity(rq, wrr_rq);


    p = wrr_task_of(wrr_se);
    p->se.exec_start = rq->clock;

    return p;
}

static struct task_struct *pick_next_task_wrr(struct rq *rq)
{
    return _pick_next_task_wrr(rq);
}

static void put_prev_task_wrr(struct rq *rq, struct task_struct *p)
{
    update_curr_wrr(rq);
    p->se.exec_start = 0;
}



/*
 * When switching a task to WRR, we may overload the runqueue
 * with WRR tasks. In this case we try to push them off to
 * other runqueues.
 */
static void switched_to_wrr(struct rq *rq, struct task_struct *p,
               int running)
{
    resched_task(rq->curr);
}

/*
 * Priority of the task has changed. This may cause
 * us to initiate a push or pull.
 */
static void prio_changed_wrr(struct rq *rq, struct task_struct *p,
                int oldprio, int running)
{
    //Do nothing
    return;
}

static void task_tick_wrr(struct rq *rq, struct task_struct *p, int queued)
{
    update_curr_wrr(rq);

    /*
     * RR tasks need a special form of timeslice management.
     * FIFO tasks have no timeslices.
     */
    if (p->policy != SCHED_WRR)
        return;

    if (--p->wrr.time_slice > 0)
        return;

    p->wrr.time_slice = DEF_TIMESLICE * p->wrr.weight;

    /*
     * Requeue to the end of queue if we are not the only element
     * on the queue:
     */
    if (p->wrr.run_list.prev != p->wrr.run_list.next) {
        requeue_task_wrr(rq, p, 0);
        set_tsk_need_resched(p);
    }
}

static void set_curr_task_wrr(struct rq *rq)
{
    struct task_struct *p = rq->curr;

    p->se.exec_start = rq->clock;
}

unsigned int get_rr_interval_wrr(struct task_struct *task)
{
    /*
     * Time slice is 0 for SCHED_FIFO tasks
     */
    if (task->policy == SCHED_WRR)
        return DEF_TIMESLICE * task->wrr.weight;
    else
        return 0;
}

static const struct sched_class wrr_sched_class = {
    .next               = &fair_sched_class,
    .enqueue_task       = enqueue_task_wrr, //Done
    .dequeue_task       = dequeue_task_wrr, //Done
    .yield_task         = yield_task_wrr, //Changed nothing

    .check_preempt_curr = check_preempt_curr_wrr,//Cleared out

    .pick_next_task     = pick_next_task_wrr, //Simplified
    .put_prev_task      = put_prev_task_wrr, //Simplified


    .set_curr_task      = set_curr_task_wrr, //Simplified
    .task_tick          = task_tick_wrr, //Done

    .get_rr_interval    = get_rr_interval_wrr, //Done

    .prio_changed       = prio_changed_wrr, //Cleared out
    .switched_to        = switched_to_wrr, //Simplified
};

#ifdef CONFIG_SCHED_DEBUG
extern void print_wrr_rq(struct seq_file *m, int cpu, struct wrr_rq *wrr_rq);

static void print_wrr_stats(struct seq_file *m, int cpu)
{
    struct wrr_rq *wrr_rq;

    rcu_read_lock();
    for_each_leaf_wrr_rq(wrr_rq, cpu_rq(cpu))
        print_wrr_rq(m, cpu, wrr_rq);
    rcu_read_unlock();
}
#endif /* CONFIG_SCHED_DEBUG */

