#include "internal.h"

/*
 * Revised wakeup rule [1]: For self-suspending tasks, rather then
 * re-initializing task's runtime and deadline, the revised wakeup
 * rule adjusts the task's runtime to avoid the task to overrun its
 * density.
 *
 * Reasoning: a task may overrun the density if:
 *    runtime / (deadline - t) > dl_runtime / dl_deadline
 *
 * Therefore, runtime can be adjusted to:
 *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
 *
 * In such way that runtime will be equal to the maximum density
 * the task can use without breaking any rule.
 *
 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
 */
static void update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
{
	u64 laxity = dl_se->deadline - rq_clock(rq);

	/*
	 * If the task has deadline < period, and the deadline is in the past,
	 * it should already be throttled before this check.
	 *
	 * See update_dl_entity() comments for further details.
	 */
	WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));

	dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
}

/*
 * When a deadline entity is placed in the runqueue, its runtime and deadline
 * might need to be updated. This is done by a CBS wake up rule. There are two
 * different rules: 1) the original CBS; and 2) the Revisited CBS.
 *
 * When the task is starting a new period, the Original CBS is used. In this
 * case, the runtime is replenished and a new absolute deadline is set.
 *
 * When a task is queued before the begin of the next period, using the
 * remaining runtime and deadline could make the entity to overflow, see
 * dl_entity_overflow() to find more about runtime overflow. When such case
 * is detected, the runtime and deadline need to be updated.
 *
 * If the task has an implicit deadline, i.e., deadline == period, the Original
 * CBS is applied. The runtime is replenished and a new absolute deadline is
 * set, as in the previous cases.
 *
 * However, the Original CBS does not work properly for tasks with
 * deadline < period, which are said to have a constrained deadline. By
 * applying the Original CBS, a constrained deadline task would be able to run
 * runtime/deadline in a period. With deadline < period, the task would
 * overrun the runtime/period allowed bandwidth, breaking the admission test.
 *
 * In order to prevent this misbehave, the Revisited CBS is used for
 * constrained deadline tasks when a runtime overflow is detected. In the
 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
 * the remaining runtime of the task is reduced to avoid runtime overflow.
 * Please refer to the comments update_dl_revised_wakeup() function to find
 * more about the Revised CBS rule.
 */
static void update_dl_entity(struct sched_dl_entity *dl_se)
{
    struct rq *rq = rq_of_dl_se(dl_se);

    if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
        dl_entity_overflow(dl_se, rq_clock(rq)))
    {

        if (unlikely(!dl_is_implicit(dl_se) &&
                     !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
                     !is_dl_boosted(dl_se)))
        {
            update_dl_revised_wakeup(dl_se, rq);
            return;
        }

        replenish_dl_new_period(dl_se, rq);
    }
    else if (dl_server(dl_se) && dl_se->dl_defer)
    {
        /*
         * The server can still use its previous deadline, so check if
         * it left the dl_defer_running state.
         */
        if (!dl_se->dl_defer_running)
        {
            dl_se->dl_defer_armed = 1;
            dl_se->dl_throttled = 1;
        }
    }
}

/*
 * During the activation, CBS checks if it can reuse the current task's
 * runtime and period. If the deadline of the task is in the past, CBS
 * cannot use the runtime, and so it replenishes the task. This rule
 * works fine for implicit deadline tasks (deadline == period), and the
 * CBS was designed for implicit deadline tasks. However, a task with
 * constrained deadline (deadline < period) might be awakened after the
 * deadline, but before the next period. In this case, replenishing the
 * task would allow it to run for runtime / deadline. As in this case
 * deadline < period, CBS enables a task to run for more than the
 * runtime / period. In a very loaded system, this can cause a domino
 * effect, making other tasks miss their deadlines.
 *
 * To avoid this problem, in the activation of a constrained deadline
 * task after the deadline but before the next period, throttle the
 * task and set the replenishing timer to the begin of the next period,
 * unless it is boosted.
 */
static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
{
    struct rq *rq = rq_of_dl_se(dl_se);

    if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
        dl_time_before(rq_clock(rq), dl_next_period(dl_se)))
    {
        if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se)))
            return;
        dl_se->dl_throttled = 1;
        if (dl_se->runtime > 0)
            dl_se->runtime = 0;
    }
}

/*
 * We are being explicitly informed that a new instance is starting,
 * and this means that:
 *  - the absolute deadline of the entity has to be placed at
 *    current time + relative deadline;
 *  - the runtime of the entity has to be set to the maximum value.
 *
 * The capability of specifying such event is useful whenever a -deadline
 * entity wants to (try to!) synchronize its behaviour with the scheduler's
 * one, and to (try to!) reconcile itself with its own scheduling
 * parameters.
 */
static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
{
    struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
    struct rq *rq = rq_of_dl_rq(dl_rq);

    update_rq_clock(rq);

    WARN_ON(is_dl_boosted(dl_se));
    WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));

    /*
     * We are racing with the deadline timer. So, do nothing because
     * the deadline timer handler will take care of properly recharging
     * the runtime and postponing the deadline
     */
    if (dl_se->dl_throttled)
        return;

    /*
     * We use the regular wall clock time to set deadlines in the
     * future; in fact, we must consider execution overheads (time
     * spent on hardirq context, etc.).
     */
    replenish_dl_new_period(dl_se, rq);
}

static inline void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
    u64 deadline = dl_se->deadline;

    dl_rq->dl_nr_running++;
    add_nr_running(rq_of_dl_rq(dl_rq), 1);
}

static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
{
    struct dl_rq *dl_rq = dl_rq_of_se(dl_se);

    WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->rb_node));

    rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);

    inc_dl_tasks(dl_se, dl_rq);
}

static void enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
{
    WARN_ON_ONCE(on_dl_rq(dl_se));

    update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);

    /*
     * Check if a constrained deadline task was activated
     * after the deadline but before the next period.
     * If that is the case, the task will be throttled and
     * the replenishment timer will be set to the next period.
     */
    if (!dl_se->dl_throttled && !dl_is_implicit(dl_se))
        dl_check_constrained_dl(dl_se);

    if (flags & (ENQUEUE_RESTORE | ENQUEUE_MIGRATING))
    {
        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);

        add_rq_bw(dl_se, dl_rq);
        add_running_bw(dl_se, dl_rq);
    }

    /*
     * If p is throttled, we do not enqueue it. In fact, if it exhausted
     * its budget it needs a replenishment and, since it now is on
     * its rq, the bandwidth timer callback (which clearly has not
     * run yet) will take care of this.
     * However, the active utilization does not depend on the fact
     * that the task is on the runqueue or not (but depends on the
     * task's state - in GRUB parlance, "inactive" vs "active contending").
     * In other words, even if a task is throttled its utilization must
     * be counted in the active utilization; hence, we need to call
     * add_running_bw().
     */
    if (!dl_se->dl_defer && dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH))
    {
        if (flags & ENQUEUE_WAKEUP)
            task_contending(dl_se, flags);

        return;
    }

    /*
     * If this is a wakeup or a new instance, the scheduling
     * parameters of the task might need updating. Otherwise,
     * we want a replenishment of its runtime.
     */
    if (flags & ENQUEUE_WAKEUP)
    {
        task_contending(dl_se, flags);
        update_dl_entity(dl_se);
    }
    else if (flags & ENQUEUE_REPLENISH)
    {
        replenish_dl_entity(dl_se);
    }
    else if ((flags & ENQUEUE_RESTORE) &&
             !is_dl_boosted(dl_se) &&
             dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se))))
    {
        setup_new_dl_entity(dl_se);
    }

    /*
     * If the reservation is still throttled, e.g., it got replenished but is a
     * deferred task and still got to wait, don't enqueue.
     */
    if (dl_se->dl_throttled && start_dl_timer(dl_se))
        return;

    /*
     * We're about to enqueue, make sure we're not ->dl_throttled!
     * In case the timer was not started, say because the defer time
     * has passed, mark as not throttled and mark unarmed.
     * Also cancel earlier timers, since letting those run is pointless.
     */
    if (dl_se->dl_throttled)
    {
        hrtimer_try_to_cancel(&dl_se->dl_timer);
        dl_se->dl_defer_armed = 0;
        dl_se->dl_throttled = 0;
    }

    __enqueue_dl_entity(dl_se);
}

static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
    if (is_dl_boosted(&p->dl))
    {
        /*
         * Because of delays in the detection of the overrun of a
         * thread's runtime, it might be the case that a thread
         * goes to sleep in a rt mutex with negative runtime. As
         * a consequence, the thread will be throttled.
         *
         * While waiting for the mutex, this thread can also be
         * boosted via PI, resulting in a thread that is throttled
         * and boosted at the same time.
         *
         * In this case, the boost overrides the throttle.
         */
        if (p->dl.dl_throttled)
        {
            /*
             * The replenish timer needs to be canceled. No
             * problem if it fires concurrently: boosted threads
             * are ignored in dl_task_timer().
             */
            cancel_replenish_timer(&p->dl);
            p->dl.dl_throttled = 0;
        }
    }
    else if (!dl_prio(p->normal_prio))
    {
        /*
         * Special case in which we have a !SCHED_DEADLINE task that is going
         * to be deboosted, but exceeds its runtime while doing so. No point in
         * replenishing it, as it's going to return back to its original
         * scheduling class after this. If it has been throttled, we need to
         * clear the flag, otherwise the task may wake up as throttled after
         * being boosted again with no means to replenish the runtime and clear
         * the throttle.
         */
        p->dl.dl_throttled = 0;
        return;
    }

    check_schedstat_required();
    update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);

    if (p->on_rq == TASK_ON_RQ_MIGRATING)
        flags |= ENQUEUE_MIGRATING;

    enqueue_dl_entity(&p->dl, flags);

    if (dl_server(&p->dl))
        return;

    if (task_is_blocked(p))
        return;

    if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1)
        enqueue_pushable_dl_task(rq, p);
}
