#include "internal.h"

/* Runqueue only has SCHED_IDLE tasks enqueued */
static int sched_idle_rq(struct rq *rq)
{
    return unlikely(rq->nr_running == rq->cfs.h_nr_idle &&
                    rq->nr_running);
}

static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
    rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
                              &min_vruntime_cb);
    avg_vruntime_sub(cfs_rq, se);
}

static void set_delayed(struct sched_entity *se)
{
    se->sched_delayed = 1;
    for_each_sched_entity(se)
    {
        struct cfs_rq *cfs_rq = cfs_rq_of(se);

        cfs_rq->h_nr_runnable--;
        if (cfs_rq_throttled(cfs_rq))
            break;
    }
}

static void clear_delayed(struct sched_entity *se)
{
    se->sched_delayed = 0;
    for_each_sched_entity(se)
    {
        struct cfs_rq *cfs_rq = cfs_rq_of(se);

        cfs_rq->h_nr_runnable++;
        if (cfs_rq_throttled(cfs_rq))
            break;
    }
}

static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
{
    clear_delayed(se);
}

static bool dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
    bool sleep = flags & DEQUEUE_SLEEP;
    int action = UPDATE_TG;

    update_curr(cfs_rq);
    clear_buddies(cfs_rq, se);

    if (flags & DEQUEUE_DELAYED)
    {
        SCHED_WARN_ON(!se->sched_delayed);
    }
    else
    {
        bool delay = sleep;
        /*
         * DELAY_DEQUEUE relies on spurious wakeups, special task
         * states must not suffer spurious wakeups, excempt them.
         */
        if (flags & DEQUEUE_SPECIAL)
            delay = false;

        SCHED_WARN_ON(delay && se->sched_delayed);

        if (sched_feat(DELAY_DEQUEUE) && delay &&
            !entity_eligible(cfs_rq, se))
        {
            update_load_avg(cfs_rq, se, 0);
            set_delayed(se);
            return false;
        }
    }

    if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
        action |= DO_DETACH;

    /*
     * When dequeuing a sched_entity, we must:
     *   - Update loads to have both entity and cfs_rq synced with now.
     *   - For group_entity, update its runnable_weight to reflect the new
     *     h_nr_runnable of its group cfs_rq.
     *   - Subtract its previous weight from cfs_rq->load.weight.
     *   - For group entity, update its weight to reflect the new share
     *     of its group cfs_rq.
     */
    update_load_avg(cfs_rq, se, action);
    se_update_runnable(se);

    update_stats_dequeue_fair(cfs_rq, se, flags);

    update_entity_lag(cfs_rq, se);
    if (sched_feat(PLACE_REL_DEADLINE) && !sleep)
    {
        se->deadline -= se->vruntime;
        se->rel_deadline = 1;
    }

    if (se != cfs_rq->curr)
        __dequeue_entity(cfs_rq, se);
    se->on_rq = 0;
    account_entity_dequeue(cfs_rq, se);

    /* return excess runtime on last dequeue */
    return_cfs_rq_runtime(cfs_rq);

    update_cfs_group(se);

    /*
     * Now advance min_vruntime if @se was the entity holding it back,
     * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
     * put back on, and if we advance min_vruntime, we'll be placed back
     * further than we started -- i.e. we'll be penalized.
     */
    if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
        update_min_vruntime(cfs_rq);

    if (flags & DEQUEUE_DELAYED)
        finish_delayed_dequeue_entity(se);

    if (cfs_rq->nr_queued == 0)
        update_idle_cfs_rq_clock_pelt(cfs_rq);

    return true;
}

/*
 * Basically dequeue_task_fair(), except it can deal with dequeue_entity()
 * failing half-way through and resume the dequeue later.
 *
 * Returns:
 * -1 - dequeue delayed
 *  0 - dequeue throttled
 *  1 - dequeue complete
 */
static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
{
    bool was_sched_idle = sched_idle_rq(rq);
    int rq_h_nr_queued = rq->cfs.h_nr_queued;
    bool task_sleep = flags & DEQUEUE_SLEEP;
    bool task_delayed = flags & DEQUEUE_DELAYED;
    struct task_struct *p = NULL;
    int h_nr_idle = 0;
    int h_nr_queued = 0;
    int h_nr_runnable = 0;
    struct cfs_rq *cfs_rq;
    u64 slice = 0;

    if (entity_is_task(se))
    {
        p = task_of(se);
        h_nr_queued = 1;
        h_nr_idle = task_has_idle_policy(p);
        if (task_sleep || task_delayed || !se->sched_delayed)
            h_nr_runnable = 1;
    }
    else
    {
        cfs_rq = group_cfs_rq(se);
        slice = cfs_rq_min_slice(cfs_rq);
    }

    for_each_sched_entity(se)
    {
        cfs_rq = cfs_rq_of(se);

        if (!dequeue_entity(cfs_rq, se, flags))
        {
            if (p && &p->se == se)
                return -1;

            break;
        }

        cfs_rq->h_nr_runnable -= h_nr_runnable;
        cfs_rq->h_nr_queued -= h_nr_queued;
        cfs_rq->h_nr_idle -= h_nr_idle;

        if (cfs_rq_is_idle(cfs_rq))
            h_nr_idle = h_nr_queued;

        /* end evaluation on encountering a throttled cfs_rq */
        if (cfs_rq_throttled(cfs_rq))
            return 0;

        /* Don't dequeue parent if it has other entities besides us */
        if (cfs_rq->load.weight)
        {
            slice = cfs_rq_min_slice(cfs_rq);

            /* Avoid re-evaluating load for this entity: */
            se = parent_entity(se);
            /*
             * Bias pick_next to pick a task from this cfs_rq, as
             * p is sleeping when it is within its sched_slice.
             */
            if (task_sleep && se && !throttled_hierarchy(cfs_rq))
                set_next_buddy(se);
            break;
        }
        flags |= DEQUEUE_SLEEP;
        flags &= ~(DEQUEUE_DELAYED | DEQUEUE_SPECIAL);
    }

    for_each_sched_entity(se)
    {
        cfs_rq = cfs_rq_of(se);

        update_load_avg(cfs_rq, se, UPDATE_TG);
        se_update_runnable(se);
        update_cfs_group(se);

        se->slice = slice;
        slice = cfs_rq_min_slice(cfs_rq);

        cfs_rq->h_nr_runnable -= h_nr_runnable;
        cfs_rq->h_nr_queued -= h_nr_queued;
        cfs_rq->h_nr_idle -= h_nr_idle;

        if (cfs_rq_is_idle(cfs_rq))
            h_nr_idle = h_nr_queued;

        /* end evaluation on encountering a throttled cfs_rq */
        if (cfs_rq_throttled(cfs_rq))
            return 0;
    }

    sub_nr_running(rq, h_nr_queued);

    if (rq_h_nr_queued && !rq->cfs.h_nr_queued)
        dl_server_stop(&rq->fair_server);

    /* balance early to pull high priority tasks */
    if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
        rq->next_balance = jiffies;

    if (p && task_delayed)
    {
        SCHED_WARN_ON(!task_sleep);
        SCHED_WARN_ON(p->on_rq != 1);

        /* Fix-up what dequeue_task_fair() skipped */
        hrtick_update(rq);

        /*
         * Fix-up what block_task() skipped.
         *
         * Must be last, @p might not be valid after this.
         */
        __block_task(rq, p);
    }

    return 1;
}

/*
 * The dequeue_task method is called before nr_running is
 * decreased. We remove the task from the rbtree and
 * update the fair scheduling stats:
 */
static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{
    if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & DEQUEUE_SAVE))))
        util_est_dequeue(&rq->cfs, p);

    util_est_update(&rq->cfs, p, flags & DEQUEUE_SLEEP);
    if (dequeue_entities(rq, &p->se, flags) < 0)
        return false;

    return true;
}
