#include "internal.h"

static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se);

static void account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	update_load_add(&cfs_rq->load, se->load.weight);
#ifdef CONFIG_SMP
	if (entity_is_task(se)) {
		struct rq *rq = rq_of(cfs_rq);

		account_numa_enqueue(rq, task_of(se));
		list_add(&se->group_node, &rq->cfs_tasks);
	}
#endif
	cfs_rq->nr_queued++;
}

static void clear_delayed(struct sched_entity *se)
{
    se->sched_delayed = 0;
    for_each_sched_entity(se)
    {
        struct cfs_rq *cfs_rq = cfs_rq_of(se);

        cfs_rq->h_nr_runnable++;
        if (cfs_rq_throttled(cfs_rq))
            break;
    }
}

static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
    u64 vslice, vruntime = avg_vruntime(cfs_rq);
    s64 lag = 0;

    if (!se->custom_slice)
        se->slice = sysctl_sched_base_slice;
    vslice = calc_delta_fair(se->slice, se);

    /*
     * Due to how V is constructed as the weighted average of entities,
     * adding tasks with positive lag, or removing tasks with negative lag
     * will move 'time' backwards, this can screw around with the lag of
     * other tasks.
     *
     * EEVDF: placement strategy #1 / #2
     */
    if (sched_feat(PLACE_LAG) && cfs_rq->nr_queued && se->vlag)
    {
        struct sched_entity *curr = cfs_rq->curr;
        unsigned long load;

        lag = se->vlag;

        /*
         * If we want to place a task and preserve lag, we have to
         * consider the effect of the new entity on the weighted
         * average and compensate for this, otherwise lag can quickly
         * evaporate.
         *
         * Lag is defined as:
         *
         *   lag_i = S - s_i = w_i * (V - v_i)
         *
         * To avoid the 'w_i' term all over the place, we only track
         * the virtual lag:
         *
         *   vl_i = V - v_i <=> v_i = V - vl_i
         *
         * And we take V to be the weighted average of all v:
         *
         *   V = (\Sum w_j*v_j) / W
         *
         * Where W is: \Sum w_j
         *
         * Then, the weighted average after adding an entity with lag
         * vl_i is given by:
         *
         *   V' = (\Sum w_j*v_j + w_i*v_i) / (W + w_i)
         *      = (W*V + w_i*(V - vl_i)) / (W + w_i)
         *      = (W*V + w_i*V - w_i*vl_i) / (W + w_i)
         *      = (V*(W + w_i) - w_i*l) / (W + w_i)
         *      = V - w_i*vl_i / (W + w_i)
         *
         * And the actual lag after adding an entity with vl_i is:
         *
         *   vl'_i = V' - v_i
         *         = V - w_i*vl_i / (W + w_i) - (V - vl_i)
         *         = vl_i - w_i*vl_i / (W + w_i)
         *
         * Which is strictly less than vl_i. So in order to preserve lag
         * we should inflate the lag before placement such that the
         * effective lag after placement comes out right.
         *
         * As such, invert the above relation for vl'_i to get the vl_i
         * we need to use such that the lag after placement is the lag
         * we computed before dequeue.
         *
         *   vl'_i = vl_i - w_i*vl_i / (W + w_i)
         *         = ((W + w_i)*vl_i - w_i*vl_i) / (W + w_i)
         *
         *   (W + w_i)*vl'_i = (W + w_i)*vl_i - w_i*vl_i
         *                   = W*vl_i
         *
         *   vl_i = (W + w_i)*vl'_i / W
         */
        load = cfs_rq->avg_load;
        if (curr && curr->on_rq)
            load += scale_load_down(curr->load.weight);

        lag *= load + scale_load_down(se->load.weight);
        if (WARN_ON_ONCE(!load))
            load = 1;
        lag = div_s64(lag, load);
    }

    se->vruntime = vruntime - lag;

    if (se->rel_deadline)
    {
        se->deadline += se->vruntime;
        se->rel_deadline = 0;
        return;
    }

    /*
     * When joining the competition; the existing tasks will be,
     * on average, halfway through their slice, as such start tasks
     * off with half a slice to ease into the competition.
     */
    if (sched_feat(PLACE_DEADLINE_INITIAL) && (flags & ENQUEUE_INITIAL))
        vslice /= 2;

    /*
     * EEVDF: vd_i = ve_i + r_i/w_i
     */
    se->deadline = se->vruntime + vslice;
}

static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	avg_vruntime_add(cfs_rq, se);
	se->min_vruntime = se->vruntime;
	se->min_slice = se->slice;
	rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
				__entity_less, &min_vruntime_cb);
}

static void requeue_delayed_entity(struct sched_entity *se)
{
    struct cfs_rq *cfs_rq = cfs_rq_of(se);

    /*
     * se->sched_delayed should imply: se->on_rq == 1.
     * Because a delayed entity is one that is still on
     * the runqueue competing until elegibility.
     */
    SCHED_WARN_ON(!se->sched_delayed);
    SCHED_WARN_ON(!se->on_rq);

    if (sched_feat(DELAY_ZERO))
    {
        update_entity_lag(cfs_rq, se);
        if (se->vlag > 0)
        {
            cfs_rq->nr_queued--;
            if (se != cfs_rq->curr)
                __dequeue_entity(cfs_rq, se);
            se->vlag = 0;
            place_entity(cfs_rq, se, 0);
            if (se != cfs_rq->curr)
                __enqueue_entity(cfs_rq, se);
            cfs_rq->nr_queued++;
        }
    }

    update_load_avg(cfs_rq, se, 0);
    clear_delayed(se);
}

static void enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
    bool curr = cfs_rq->curr == se;

    /*
     * If we're the current task, we must renormalise before calling
     * update_curr().
     */
    if (curr)
        place_entity(cfs_rq, se, flags);

    update_curr(cfs_rq);

    /*
     * When enqueuing a sched_entity, we must:
     *   - Update loads to have both entity and cfs_rq synced with now.
     *   - For group_entity, update its runnable_weight to reflect the new
     *     h_nr_runnable of its group cfs_rq.
     *   - For group_entity, update its weight to reflect the new share of
     *     its group cfs_rq
     *   - Add its new weight to cfs_rq->load.weight
     */
    update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
    se_update_runnable(se);
    /*
     * XXX update_load_avg() above will have attached us to the pelt sum;
     * but update_cfs_group() here will re-adjust the weight and have to
     * undo/redo all that. Seems wasteful.
     */
    update_cfs_group(se);

    /*
     * XXX now that the entity has been re-weighted, and it's lag adjusted,
     * we can place the entity.
     */
    if (!curr)
        place_entity(cfs_rq, se, flags);

    account_entity_enqueue(cfs_rq, se);

    /* Entity has migrated, no longer consider this task hot */
    if (flags & ENQUEUE_MIGRATED)
        se->exec_start = 0;

    check_schedstat_required();
    update_stats_enqueue_fair(cfs_rq, se, flags);
    if (!curr)
        __enqueue_entity(cfs_rq, se);
    se->on_rq = 1;

    if (cfs_rq->nr_queued == 1)
    {
        check_enqueue_throttle(cfs_rq);
        if (!throttled_hierarchy(cfs_rq))
        {
            list_add_leaf_cfs_rq(cfs_rq);
        }
        else
        {
#ifdef CONFIG_CFS_BANDWIDTH
            struct rq *rq = rq_of(cfs_rq);

            if (cfs_rq_throttled(cfs_rq) && !cfs_rq->throttled_clock)
                cfs_rq->throttled_clock = rq_clock(rq);
            if (!cfs_rq->throttled_clock_self)
                cfs_rq->throttled_clock_self = rq_clock(rq);
#endif
        }
    }
}

/*
 * The enqueue_task method is called before nr_running is
 * increased. Here we update the fair scheduling stats and
 * then put the task into the rbtree:
 */
static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{
    struct cfs_rq *cfs_rq;
    struct sched_entity *se = &p->se;
    int h_nr_idle = task_has_idle_policy(p);
    int h_nr_runnable = 1;
    int task_new = !(flags & ENQUEUE_WAKEUP);
    int rq_h_nr_queued = rq->cfs.h_nr_queued;
    u64 slice = 0;

    /*
     * The code below (indirectly) updates schedutil which looks at
     * the cfs_rq utilization to select a frequency.
     * Let's add the task's estimated utilization to the cfs_rq's
     * estimated utilization, before we update schedutil.
     */
    if (!p->se.sched_delayed || (flags & ENQUEUE_DELAYED))
        util_est_enqueue(&rq->cfs, p);

    if (flags & ENQUEUE_DELAYED)
    {
        requeue_delayed_entity(se);
        return;
    }

    if (task_new && se->sched_delayed)
        h_nr_runnable = 0;

    for_each_sched_entity(se)
    {
        if (se->on_rq)
        {
            if (se->sched_delayed)
                requeue_delayed_entity(se);
            break;
        }
        cfs_rq = cfs_rq_of(se);

        /*
         * Basically set the slice of group entries to the min_slice of
         * their respective cfs_rq. This ensures the group can service
         * its entities in the desired time-frame.
         */
        if (slice)
        {
            se->slice = slice;
            se->custom_slice = 1;
        }
        enqueue_entity(cfs_rq, se, flags);
        slice = cfs_rq_min_slice(cfs_rq);

        cfs_rq->h_nr_runnable += h_nr_runnable;
        cfs_rq->h_nr_queued++;
        cfs_rq->h_nr_idle += h_nr_idle;

        if (cfs_rq_is_idle(cfs_rq))
            h_nr_idle = 1;

        /* end evaluation on encountering a throttled cfs_rq */
        if (cfs_rq_throttled(cfs_rq))
            goto enqueue_throttle;

        flags = ENQUEUE_WAKEUP;
    }

    for_each_sched_entity(se)
    {
        cfs_rq = cfs_rq_of(se);

        update_load_avg(cfs_rq, se, UPDATE_TG);
        se_update_runnable(se);
        update_cfs_group(se);

        se->slice = slice;
        if (se != cfs_rq->curr)
            min_vruntime_cb_propagate(&se->run_node, NULL);
        slice = cfs_rq_min_slice(cfs_rq);

        cfs_rq->h_nr_runnable += h_nr_runnable;
        cfs_rq->h_nr_queued++;
        cfs_rq->h_nr_idle += h_nr_idle;

        if (cfs_rq_is_idle(cfs_rq))
            h_nr_idle = 1;

        /* end evaluation on encountering a throttled cfs_rq */
        if (cfs_rq_throttled(cfs_rq))
            goto enqueue_throttle;
    }

    if (!rq_h_nr_queued && rq->cfs.h_nr_queued)
    {
        /* Account for idle runtime */
        if (!rq->nr_running)
            dl_server_update_idle_time(rq, rq->curr);
        dl_server_start(&rq->fair_server);
    }

    /* At this point se is NULL and we are at root level*/
    add_nr_running(rq, 1);

    /*
     * Since new tasks are assigned an initial util_avg equal to
     * half of the spare capacity of their CPU, tiny tasks have the
     * ability to cross the overutilized threshold, which will
     * result in the load balancer ruining all the task placement
     * done by EAS. As a way to mitigate that effect, do not account
     * for the first enqueue operation of new tasks during the
     * overutilized flag detection.
     *
     * A better way of solving this problem would be to wait for
     * the PELT signals of tasks to converge before taking them
     * into account, but that is not straightforward to implement,
     * and the following generally works well enough in practice.
     */
    if (!task_new)
        check_update_overutilized_status(rq);

enqueue_throttle:
    assert_list_leaf_cfs_rq(rq);

    hrtick_update(rq);
}
