#include "internal.h"

static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
{
    s64 scaled_delta_exec;

    if (unlikely(delta_exec <= 0))
    {
        if (unlikely(dl_se->dl_yielded))
            goto throttle;
        return;
    }

    if (dl_server(dl_se) && dl_se->dl_throttled && !dl_se->dl_defer)
        return;

    if (dl_entity_is_special(dl_se))
        return;

    scaled_delta_exec = delta_exec;
    if (!dl_server(dl_se))
        scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec);

    dl_se->runtime -= scaled_delta_exec;

    /*
     * The fair server can consume its runtime while throttled (not queued/
     * running as regular CFS).
     *
     * If the server consumes its entire runtime in this state. The server
     * is not required for the current period. Thus, reset the server by
     * starting a new period, pushing the activation.
     */
    if (dl_se->dl_defer && dl_se->dl_throttled && dl_runtime_exceeded(dl_se))
    {
        /*
         * If the server was previously activated - the starving condition
         * took place, it this point it went away because the fair scheduler
         * was able to get runtime in background. So return to the initial
         * state.
         */
        dl_se->dl_defer_running = 0;

        hrtimer_try_to_cancel(&dl_se->dl_timer);

        replenish_dl_new_period(dl_se, dl_se->rq);

        /*
         * Not being able to start the timer seems problematic. If it could not
         * be started for whatever reason, we need to "unthrottle" the DL server
         * and queue right away. Otherwise nothing might queue it. That's similar
         * to what enqueue_dl_entity() does on start_dl_timer==0. For now, just warn.
         */
        WARN_ON_ONCE(!start_dl_timer(dl_se));

        return;
    }

throttle:
    if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded)
    {
        dl_se->dl_throttled = 1;

        /* If requested, inform the user about runtime overruns. */
        if (dl_runtime_exceeded(dl_se) &&
            (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
            dl_se->dl_overrun = 1;

        dequeue_dl_entity(dl_se, 0);
        if (!dl_server(dl_se))
        {
            update_stats_dequeue_dl(&rq->dl, dl_se, 0);
            dequeue_pushable_dl_task(rq, dl_task_of(dl_se));
        }

        if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se)))
        {
            if (dl_server(dl_se))
                enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
            else
                enqueue_task_dl(rq, dl_task_of(dl_se), ENQUEUE_REPLENISH);
        }

        if (!is_leftmost(dl_se, &rq->dl))
            resched_curr(rq);
    }

    /*
     * The fair server (sole dl_server) does not account for real-time
     * workload because it is running fair work.
     */
    if (dl_se == &rq->fair_server)
        return;

#ifdef CONFIG_RT_GROUP_SCHED
    /*
     * Because -- for now -- we share the rt bandwidth, we need to
     * account our runtime there too, otherwise actual rt tasks
     * would be able to exceed the shared quota.
     *
     * Account to the root rt group for now.
     *
     * The solution we're working towards is having the RT groups scheduled
     * using deadline servers -- however there's a few nasties to figure
     * out before that can happen.
     */
    if (rt_bandwidth_enabled())
    {
        struct rt_rq *rt_rq = &rq->rt;

        raw_spin_lock(&rt_rq->rt_runtime_lock);
        /*
         * We'll let actual RT tasks worry about the overflow here, we
         * have our own CBS to keep us inline; only account when RT
         * bandwidth is relevant.
         */
        if (sched_rt_bandwidth_account(rt_rq))
            rt_rq->rt_time += delta_exec;
        raw_spin_unlock(&rt_rq->rt_runtime_lock);
    }
#endif /* CONFIG_RT_GROUP_SCHED */
}

/*
 * Update the current task's runtime statistics (provided it is still
 * a -deadline task and has not been removed from the dl_rq).
 */
static void update_curr_dl(struct rq *rq)
{
    struct task_struct *donor = rq->donor;
    struct sched_dl_entity *dl_se = &donor->dl;
    s64 delta_exec;

    if (!dl_task(donor) || !on_dl_rq(dl_se))
        return;

    /*
     * Consumed budget is computed considering the time as
     * observed by schedulable tasks (excluding time spent
     * in hardirq context, etc.). Deadlines are instead
     * computed using hard walltime. This seems to be the more
     * natural solution, but the full ramifications of this
     * approach need further study.
     */
    delta_exec = update_curr_common(rq);
    update_curr_dl_se(rq, dl_se, delta_exec);
}
