#include "internal.h"

/*
 * Preempt the current task with a newly woken task if needed:
 */
static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int wake_flags)
{
    struct task_struct *donor = rq->donor;
    struct sched_entity *se = &donor->se, *pse = &p->se;
    struct cfs_rq *cfs_rq = task_cfs_rq(donor);
    int cse_is_idle, pse_is_idle;

    if (unlikely(se == pse))
        return;

    /*
     * We can come here with TIF_NEED_RESCHED already set from new task
     * wake up path.
     *
     * Note: this also catches the edge-case of curr being in a throttled
     * group (e.g. via set_curr_task), since update_curr() (in the
     * enqueue of curr) will have resulted in resched being set.  This
     * prevents us from potentially nominating it as a false LAST_BUDDY
     * below.
     */
    if (test_tsk_need_resched(rq->curr))
        return;

    find_matching_se(&se, &pse);
    WARN_ON_ONCE(!pse);

    cse_is_idle = se_is_idle(se);
    pse_is_idle = se_is_idle(pse);

    /*
     * Preempt an idle entity in favor of a non-idle entity (and don't preempt
     * in the inverse case).
     */
    if (cse_is_idle && !pse_is_idle)
        goto preempt;

    /*
     * BATCH and IDLE tasks do not preempt others.
     */
    if (unlikely(!normal_policy(p->policy)))
        return;

    cfs_rq = cfs_rq_of(se);
    update_curr(cfs_rq);

    /*
     * If @p has become the most eligible task, force preemption.
     */
    if (pick_eevdf(cfs_rq) == pse)
        goto preempt;

    return;

preempt:
    resched_curr_lazy(rq);
}
