#include "internal.h"

static inline void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
	WARN_ON(!dl_rq->dl_nr_running);
	dl_rq->dl_nr_running--;
	sub_nr_running(rq_of_dl_rq(dl_rq), 1);

	dec_dl_deadline(dl_rq, dl_se->deadline);
}

static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
{
	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);

	if (RB_EMPTY_NODE(&dl_se->rb_node))
		return;

	rb_erase_cached(&dl_se->rb_node, &dl_rq->root);

	RB_CLEAR_NODE(&dl_se->rb_node);

	dec_dl_tasks(dl_se, dl_rq);
}

static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags)
{
    __dequeue_dl_entity(dl_se);

    if (flags & (DEQUEUE_SAVE | DEQUEUE_MIGRATING))
    {
        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);

        sub_running_bw(dl_se, dl_rq);
        sub_rq_bw(dl_se, dl_rq);
    }

    /*
     * This check allows to start the inactive timer (or to immediately
     * decrease the active utilization, if needed) in two cases:
     * when the task blocks and when it is terminating
     * (p->state == TASK_DEAD). We can handle the two cases in the same
     * way, because from GRUB's point of view the same thing is happening
     * (the task moves from "active contending" to "active non contending"
     * or "inactive")
     */
    if (flags & DEQUEUE_SLEEP)
        task_non_contending(dl_se);
}

static bool dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
    update_curr_dl(rq);

    if (p->on_rq == TASK_ON_RQ_MIGRATING)
        flags |= DEQUEUE_MIGRATING;

    dequeue_dl_entity(&p->dl, flags);
    if (!p->dl.dl_throttled && !dl_server(&p->dl))
        dequeue_pushable_dl_task(rq, p);

    return true;
}
