#include "internal.h"

struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
{
    struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);

    if (!left)
        return NULL;

    return __node_2_se(left);
}

/*
 * Earliest Eligible Virtual Deadline First
 *
 * In order to provide latency guarantees for different request sizes
 * EEVDF selects the best runnable task from two criteria:
 *
 *  1) the task must be eligible (must be owed service)
 *
 *  2) from those tasks that meet 1), we select the one
 *     with the earliest virtual deadline.
 *
 * We can do this in O(log n) time due to an augmented RB-tree. The
 * tree keeps the entries sorted on deadline, but also functions as a
 * heap based on the vruntime by keeping:
 *
 *  se->min_vruntime = min(se->vruntime, se->{left,right}->min_vruntime)
 *
 * Which allows tree pruning through eligibility.
 */
static struct sched_entity *__pick_eevdf(struct cfs_rq *cfs_rq, bool protect)
{
    struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node;
    struct sched_entity *se = __pick_first_entity(cfs_rq);
    struct sched_entity *curr = cfs_rq->curr;
    struct sched_entity *best = NULL;

    /*
     * We can safely skip eligibility check if there is only one entity
     * in this cfs_rq, saving some cycles.
     */
    if (cfs_rq->nr_queued == 1)
        return curr && curr->on_rq ? curr : se;

    if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
        curr = NULL;

    if (curr && protect && protect_slice(curr))
        return curr;

    /* Pick the leftmost entity if it's eligible */
    if (se && entity_eligible(cfs_rq, se))
    {
        best = se;
        goto found;
    }

    /* Heap search for the EEVD entity */
    while (node)
    {
        struct rb_node *left = node->rb_left;

        /*
         * Eligible entities in left subtree are always better
         * choices, since they have earlier deadlines.
         */
        if (left && vruntime_eligible(cfs_rq,
                                      __node_2_se(left)->min_vruntime))
        {
            node = left;
            continue;
        }

        se = __node_2_se(node);

        /*
         * The left subtree either is empty or has no eligible
         * entity, so check the current node since it is the one
         * with earliest deadline that might be eligible.
         */
        if (entity_eligible(cfs_rq, se))
        {
            best = se;
            break;
        }

        node = node->rb_right;
    }
found:
    if (!best || (curr && entity_before(curr, best)))
        best = curr;

    return best;
}

static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
{
    return __pick_eevdf(cfs_rq, true);
}

/*
 * Pick the next process, keeping these things in mind, in this order:
 * 1) keep things fair between processes/task groups
 * 2) pick the "next" process, since someone really wants that to run
 * 3) pick the "last" process, for cache locality
 * 4) do not run the "skip" process, if something else is available
 */
static struct sched_entity *pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq)
{
    struct sched_entity *se;

    /*
     * Picking the ->next buddy will affect latency but not fairness.
     */
    if (sched_feat(PICK_BUDDY) &&
        cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next))
    {
        /* ->next will never be delayed */
        SCHED_WARN_ON(cfs_rq->next->sched_delayed);
        return cfs_rq->next;
    }

    se = pick_eevdf(cfs_rq);
    if (se->sched_delayed)
    {
        dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
        /*
         * Must not reference @se again, see __block_task().
         */
        return NULL;
    }

    return se;
}

static struct task_struct *pick_task_fair(struct rq *rq)
{
    struct sched_entity *se;
    struct cfs_rq *cfs_rq;

again:
    cfs_rq = &rq->cfs;
    if (!cfs_rq->nr_queued)
        return NULL;

    do
    {
        /* Might not have done put_prev_entity() */
        if (cfs_rq->curr && cfs_rq->curr->on_rq)
            update_curr(cfs_rq);

        if (unlikely(check_cfs_rq_runtime(cfs_rq)))
            goto again;

        se = pick_next_entity(rq, cfs_rq);
        if (!se)
            goto again;
        cfs_rq = group_cfs_rq(se);
    } while (cfs_rq);

    return task_of(se);
}

struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
    struct sched_entity *se;
    struct task_struct *p;
    int new_tasks;

    p = pick_task_fair(rq);
    if (p)
    {
        se = &p->se;

        put_prev_set_next_task(rq, prev, p);
    }

    return p;
}

static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_struct *prev)
{
    return pick_next_task_fair(rq, prev, NULL);
}
