// SPDX-License-Identifier: GPL-2.0
/*
 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
 * policies)
 */
#include "sched.h"

#include "pelt.h"
#include "walt.h"

int sched_rr_timeslice = RR_TIMESLICE;
int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
/* More than 4 hours if BW_SHIFT equals 20. */
static const u64 max_rt_runtime = MAX_BW;

static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);

struct rt_bandwidth def_rt_bandwidth;

#ifdef CONFIG_SCHED_RT_CAS
unsigned int sysctl_sched_enable_rt_cas = 1;
#endif

#ifdef CONFIG_SCHED_RT_ACTIVE_LB
unsigned int sysctl_sched_enable_rt_active_lb = 1;
#endif

static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
{
    struct rt_bandwidth *rt_b = container_of(timer, struct rt_bandwidth, rt_period_timer);
    int idle = 0;
    int overrun;

    raw_spin_lock(&rt_b->rt_runtime_lock);
    for (;;) {
        overrun = hrtimer_forward_now(timer, rt_b->rt_period);
        if (!overrun) {
            break;
        }

        raw_spin_unlock(&rt_b->rt_runtime_lock);
        idle = do_sched_rt_period_timer(rt_b, overrun);
        raw_spin_lock(&rt_b->rt_runtime_lock);
    }
    if (idle) {
        rt_b->rt_period_active = 0;
    }
    raw_spin_unlock(&rt_b->rt_runtime_lock);

    return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}

void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
{
    rt_b->rt_period = ns_to_ktime(period);
    rt_b->rt_runtime = runtime;

    raw_spin_lock_init(&rt_b->rt_runtime_lock);

    hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
    rt_b->rt_period_timer.function = sched_rt_period_timer;
}

static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
{
    if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) {
        return;
    }

    raw_spin_lock(&rt_b->rt_runtime_lock);
    if (!rt_b->rt_period_active) {
        rt_b->rt_period_active = 1;
        /*
         * SCHED_DEADLINE updates the bandwidth, as a run away
         * RT task with a DL task could hog a CPU. But DL does
         * not reset the period. If a deadline task was running
         * without an RT task running, it can cause RT tasks to
         * throttle when they start up. Kick the timer right away
         * to update the period.
         */
        hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
        hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED_HARD);
    }
    raw_spin_unlock(&rt_b->rt_runtime_lock);
}

void init_rt_rq(struct rt_rq *rt_rq)
{
    struct rt_prio_array *array;
    int i;

    array = &rt_rq->active;
    for (i = 0; i < MAX_RT_PRIO; i++) {
        INIT_LIST_HEAD(array->queue + i);
        __clear_bit(i, array->bitmap);
    }
    /* delimiter for bitsearch: */
    __set_bit(MAX_RT_PRIO, array->bitmap);

#if defined CONFIG_SMP
    rt_rq->highest_prio.curr = MAX_RT_PRIO;
    rt_rq->highest_prio.next = MAX_RT_PRIO;
    rt_rq->rt_nr_migratory = 0;
    rt_rq->overloaded = 0;
    plist_head_init(&rt_rq->pushable_tasks);
#endif /* CONFIG_SMP */
    /* We start is dequeued state, because no RT tasks are queued */
    rt_rq->rt_queued = 0;

    rt_rq->rt_time = 0;
    rt_rq->rt_throttled = 0;
    rt_rq->rt_runtime = 0;
    raw_spin_lock_init(&rt_rq->rt_runtime_lock);
}

#ifdef CONFIG_RT_GROUP_SCHED
static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
{
    hrtimer_cancel(&rt_b->rt_period_timer);
}

#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)

static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
#ifdef CONFIG_SCHED_DEBUG
    WARN_ON_ONCE(!rt_entity_is_task(rt_se));
#endif
    return container_of(rt_se, struct task_struct, rt);
}

static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
    return rt_rq->rq;
}

static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
    return rt_se->rt_rq;
}

static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
{
    struct rt_rq *rt_rq = rt_se->rt_rq;

    return rt_rq->rq;
}

void free_rt_sched_group(struct task_group *tg)
{
    int i;

    if (tg->rt_se) {
        destroy_rt_bandwidth(&tg->rt_bandwidth);
    }

    for_each_possible_cpu(i)
    {
        if (tg->rt_rq) {
            kfree(tg->rt_rq[i]);
        }
        if (tg->rt_se) {
            kfree(tg->rt_se[i]);
        }
    }

    kfree(tg->rt_rq);
    kfree(tg->rt_se);
}

void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int cpu,
                      struct sched_rt_entity *parent)
{
    struct rq *rq = cpu_rq(cpu);

    rt_rq->highest_prio.curr = MAX_RT_PRIO;
    rt_rq->rt_nr_boosted = 0;
    rt_rq->rq = rq;
    rt_rq->tg = tg;

    tg->rt_rq[cpu] = rt_rq;
    tg->rt_se[cpu] = rt_se;

    if (!rt_se) {
        return;
    }

    if (!parent) {
        rt_se->rt_rq = &rq->rt;
    } else {
        rt_se->rt_rq = parent->my_q;
    }

    rt_se->my_q = rt_rq;
    rt_se->parent = parent;
    INIT_LIST_HEAD(&rt_se->run_list);
}

int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
    struct rt_rq *rt_rq;
    struct sched_rt_entity *rt_se;
    int i;

    tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
    if (!tg->rt_rq) {
        goto err;
    }
    tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
    if (!tg->rt_se) {
        goto err;
    }

    init_rt_bandwidth(&tg->rt_bandwidth, ktime_to_ns(def_rt_bandwidth.rt_period), 0);

    for_each_possible_cpu(i)
    {
        rt_rq = kzalloc_node(sizeof(struct rt_rq), GFP_KERNEL, cpu_to_node(i));
        if (!rt_rq) {
            goto err;
        }

        rt_se = kzalloc_node(sizeof(struct sched_rt_entity), GFP_KERNEL, cpu_to_node(i));
        if (!rt_se) {
            goto err_free_rq;
        }

        init_rt_rq(rt_rq);
        rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
        init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
    }

    return 1;

err_free_rq:
    kfree(rt_rq);
err:
    return 0;
}

#else /* CONFIG_RT_GROUP_SCHED */

#define rt_entity_is_task(rt_se) (1)

static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
    return container_of(rt_se, struct task_struct, rt);
}

static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
    return container_of(rt_rq, struct rq, rt);
}

static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
{
    struct task_struct *p = rt_task_of(rt_se);

    return task_rq(p);
}

static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
    struct rq *rq = rq_of_rt_se(rt_se);

    return &rq->rt;
}

void free_rt_sched_group(struct task_group *tg)
{
}

int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
    return 1;
}
#endif /* CONFIG_RT_GROUP_SCHED */

#ifdef CONFIG_SMP

static void pull_rt_task(struct rq *this_rq);

static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
    /*
     * Try to pull RT tasks here if we lower this rq's prio and cpu is not
     * isolated
     */
    return rq->rt.highest_prio.curr > prev->prio && !cpu_isolated(cpu_of(rq));
}

static inline int rt_overloaded(struct rq *rq)
{
    return atomic_read(&rq->rd->rto_count);
}

static inline void rt_set_overload(struct rq *rq)
{
    if (!rq->online) {
        return;
    }

    cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
    /*
     * Make sure the mask is visible before we set
     * the overload count. That is checked to determine
     * if we should look at the mask. It would be a shame
     * if we looked at the mask, but the mask was not
     * updated yet.
     *
     * Matched by the barrier in pull_rt_task().
     */
    smp_wmb();
    atomic_inc(&rq->rd->rto_count);
}

static inline void rt_clear_overload(struct rq *rq)
{
    if (!rq->online) {
        return;
    }

    /* the order here really doesn't matter */
    atomic_dec(&rq->rd->rto_count);
    cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
}

static void update_rt_migration(struct rt_rq *rt_rq)
{
    if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
        if (!rt_rq->overloaded) {
            rt_set_overload(rq_of_rt_rq(rt_rq));
            rt_rq->overloaded = 1;
        }
    } else if (rt_rq->overloaded) {
        rt_clear_overload(rq_of_rt_rq(rt_rq));
        rt_rq->overloaded = 0;
    }
}

static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
    struct task_struct *p;

    if (!rt_entity_is_task(rt_se)) {
        return;
    }

    p = rt_task_of(rt_se);
    rt_rq = &rq_of_rt_rq(rt_rq)->rt;

    rt_rq->rt_nr_total++;
    if (p->nr_cpus_allowed > 1) {
        rt_rq->rt_nr_migratory++;
    }

    update_rt_migration(rt_rq);
}

static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
    struct task_struct *p;

    if (!rt_entity_is_task(rt_se)) {
        return;
    }

    p = rt_task_of(rt_se);
    rt_rq = &rq_of_rt_rq(rt_rq)->rt;

    rt_rq->rt_nr_total--;
    if (p->nr_cpus_allowed > 1) {
        rt_rq->rt_nr_migratory--;
    }

    update_rt_migration(rt_rq);
}

static inline int has_pushable_tasks(struct rq *rq)
{
    return !plist_head_empty(&rq->rt.pushable_tasks);
}

static DEFINE_PER_CPU(struct callback_head, rt_push_head);
static DEFINE_PER_CPU(struct callback_head, rt_pull_head);

static void push_rt_tasks(struct rq *);
static void pull_rt_task(struct rq *);

static inline void rt_queue_push_tasks(struct rq *rq)
{
    if (!has_pushable_tasks(rq)) {
        return;
    }

    queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
}

static inline void rt_queue_pull_task(struct rq *rq)
{
    queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
}

static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
    plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
    plist_node_init(&p->pushable_tasks, p->prio);
    plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);

    /* Update the highest prio pushable task */
    if (p->prio < rq->rt.highest_prio.next) {
        rq->rt.highest_prio.next = p->prio;
    }
}

static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
    plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);

    /* Update the new highest prio pushable task */
    if (has_pushable_tasks(rq)) {
        p = plist_first_entry(&rq->rt.pushable_tasks, struct task_struct, pushable_tasks);
        rq->rt.highest_prio.next = p->prio;
    } else {
        rq->rt.highest_prio.next = MAX_RT_PRIO;
    }
}

#else

static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
}

static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
}

static inline void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}

static inline void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}

static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
    return false;
}

static inline void pull_rt_task(struct rq *this_rq)
{
}

static inline void rt_queue_push_tasks(struct rq *rq)
{
}
#endif /* CONFIG_SMP */

static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);

static inline int on_rt_rq(struct sched_rt_entity *rt_se)
{
    return rt_se->on_rq;
}

#ifdef CONFIG_UCLAMP_TASK
/*
 * Verify the fitness of task @p to run on @cpu taking into account the uclamp
 * settings.
 *
 * This check is only important for heterogeneous systems where uclamp_min value
 * is higher than the capacity of a @cpu. For non-heterogeneous system this
 * function will always return true.
 *
 * The function will return true if the capacity of the @cpu is >= the
 * uclamp_min and false otherwise.
 *
 * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
 * > uclamp_max.
 */
static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
{
    unsigned int min_cap;
    unsigned int max_cap;
    unsigned int cpu_cap;

    /* Only heterogeneous systems can benefit from this check */
    if (!static_branch_unlikely(&sched_asym_cpucapacity)) {
        return true;
    }

    min_cap = uclamp_eff_value(p, UCLAMP_MIN);
    max_cap = uclamp_eff_value(p, UCLAMP_MAX);

    cpu_cap = capacity_orig_of(cpu);

    return cpu_cap >= min(min_cap, max_cap);
}
#else
static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
{
    return true;
}
#endif

#ifdef CONFIG_RT_GROUP_SCHED

static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
    if (!rt_rq->tg) {
        return RUNTIME_INF;
    }

    return rt_rq->rt_runtime;
}

static inline u64 sched_rt_period(struct rt_rq *rt_rq)
{
    return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
}

typedef struct task_group *rt_rq_iter_t;

static inline struct task_group *next_task_group(struct task_group *tg)
{
    do {
        tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list);
    } while (&tg->list != &task_groups && task_group_is_autogroup(tg));

    if (&tg->list == &task_groups) {
        tg = NULL;
    }

    return tg;
}

#define cycle_each_rt_rq(rt_rq, iter, rq)
    do {                                                                                    \
        for (iter = container_of(&task_groups, typeof(*iter), list);                        \
             (iter = next_task_group(iter)) && (rt_rq = iter->rt_rq[cpu_of(rq)]);)          \
    } while (0)

#define cycle_each_sched_rt_entity(rt_se) for (; rt_se; rt_se = rt_se->parent)

static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
    return rt_se->my_q;
}

static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);

static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{
    struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
    struct rq *rq = rq_of_rt_rq(rt_rq);
    struct sched_rt_entity *rt_se;

    int cpu = cpu_of(rq);

    rt_se = rt_rq->tg->rt_se[cpu];

    if (rt_rq->rt_nr_running) {
        if (!rt_se) {
            enqueue_top_rt_rq(rt_rq);
        } else if (!on_rt_rq(rt_se)) {
            enqueue_rt_entity(rt_se, 0);
        }

        if (rt_rq->highest_prio.curr < curr->prio) {
            resched_curr(rq);
        }
    }
}

static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{
    struct sched_rt_entity *rt_se;
    int cpu = cpu_of(rq_of_rt_rq(rt_rq));

    rt_se = rt_rq->tg->rt_se[cpu];

    if (!rt_se) {
        dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
        /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
        cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
    } else if (on_rt_rq(rt_se)) {
        dequeue_rt_entity(rt_se, 0);
    }
}

static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
    return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
}

static int rt_se_boosted(struct sched_rt_entity *rt_se)
{
    struct rt_rq *rt_rq = group_rt_rq(rt_se);
    struct task_struct *p;

    if (rt_rq) {
        return !!rt_rq->rt_nr_boosted;
    }

    p = rt_task_of(rt_se);
    return p->prio != p->normal_prio;
}

#ifdef CONFIG_SMP
static inline const struct cpumask *sched_rt_period_mask(void)
{
    return this_rq()->rd->span;
}
#else
static inline const struct cpumask *sched_rt_period_mask(void)
{
    return cpu_online_mask;
}
#endif

static inline struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
    return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
}

static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
{
    return &rt_rq->tg->rt_bandwidth;
}

#else /* !CONFIG_RT_GROUP_SCHED */

static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
    return rt_rq->rt_runtime;
}

static inline u64 sched_rt_period(struct rt_rq *rt_rq)
{
    return ktime_to_ns(def_rt_bandwidth.rt_period);
}

typedef struct rt_rq *rt_rq_iter_t;

#define cycle_each_rt_rq(rt_rq, iter, rq) for ((void)(iter), (rt_rq) = &(rq)->rt; (rt_rq); (rt_rq) = NULL)

#define cycle_each_sched_rt_entity(rt_se) for (; rt_se; rt_se = NULL)

static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
    return NULL;
}

static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{
    struct rq *rq = rq_of_rt_rq(rt_rq);

    if (!rt_rq->rt_nr_running) {
        return;
    }

    enqueue_top_rt_rq(rt_rq);
    resched_curr(rq);
}

static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{
        dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
}

static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
    return rt_rq->rt_throttled;
}

static inline const struct cpumask *sched_rt_period_mask(void)
{
    return cpu_online_mask;
}

static inline struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
    return &cpu_rq(cpu)->rt;
}

static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
{
    return &def_rt_bandwidth;
}

#endif /* CONFIG_RT_GROUP_SCHED */

bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
{
    struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);

    return (hrtimer_active(&rt_b->rt_period_timer) || rt_rq->rt_time < rt_b->rt_runtime);
}

#ifdef CONFIG_SMP
/*
 * We ran out of runtime, see if we can borrow some from our neighbours.
 */
static void do_balance_runtime(struct rt_rq *rt_rq)
{
    struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
    struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
    int i, weight;
    u64 rt_period;

    weight = cpumask_weight(rd->span);

    raw_spin_lock(&rt_b->rt_runtime_lock);
    rt_period = ktime_to_ns(rt_b->rt_period);
    for_each_cpu(i, rd->span)
    {
        struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
        s64 diff;

        if (iter == rt_rq) {
            continue;
        }

        raw_spin_lock(&iter->rt_runtime_lock);
        /*
         * Either all rqs have inf runtime and there's nothing to steal
         * or __disable_runtime() below sets a specific rq to inf to
         * indicate its been disabled and disalow stealing.
         */
        if (iter->rt_runtime == RUNTIME_INF) {
            goto next;
        }

        /*
         * From runqueues with spare time, take 1/n part of their
         * spare time, but no more than our period.
         */
        diff = iter->rt_runtime - iter->rt_time;
        if (diff > 0) {
            diff = div_u64((u64)diff, weight);
            if (rt_rq->rt_runtime + diff > rt_period) {
                diff = rt_period - rt_rq->rt_runtime;
            }
            iter->rt_runtime -= diff;
            rt_rq->rt_runtime += diff;
            if (rt_rq->rt_runtime == rt_period) {
                raw_spin_unlock(&iter->rt_runtime_lock);
                break;
            }
        }
    next:
        raw_spin_unlock(&iter->rt_runtime_lock);
    }
    raw_spin_unlock(&rt_b->rt_runtime_lock);
}

/*
 * Ensure this RQ takes back all the runtime it lend to its neighbours.
 */
static void __disable_runtime(struct rq *rq)
{
    struct root_domain *rd = rq->rd;
    rt_rq_iter_t iter;
    struct rt_rq *rt_rq;

    if (unlikely(!scheduler_running)) {
        return;
    }

    cycle_each_rt_rq(rt_rq, iter, rq) {
        struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
        s64 want;
        int i;

        raw_spin_lock(&rt_b->rt_runtime_lock);
        raw_spin_lock(&rt_rq->rt_runtime_lock);
        /*
         * Either we're all inf and nobody needs to borrow, or we're
         * already disabled and thus have nothing to do, or we have
         * exactly the right amount of runtime to take out.
         */
        if (rt_rq->rt_runtime == RUNTIME_INF || rt_rq->rt_runtime == rt_b->rt_runtime) {
            goto balanced;
        }
        raw_spin_unlock(&rt_rq->rt_runtime_lock);

        /*
         * Calculate the difference between what we started out with
         * and what we current have, that's the amount of runtime
         * we lend and now have to reclaim.
         */
        want = rt_b->rt_runtime - rt_rq->rt_runtime;

        /*
         * Greedy reclaim, take back as much as we can.
         */
        for_each_cpu(i, rd->span)
        {
            struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
            s64 diff;

            /*
             * Can't reclaim from ourselves or disabled runqueues.
             */
            if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) {
                continue;
            }

            raw_spin_lock(&iter->rt_runtime_lock);
            if (want > 0) {
                diff = min_t(s64, iter->rt_runtime, want);
                iter->rt_runtime -= diff;
                want -= diff;
            } else {
                iter->rt_runtime -= want;
                want -= want;
            }
            raw_spin_unlock(&iter->rt_runtime_lock);

            if (!want) {
                break;
            }
        }

        raw_spin_lock(&rt_rq->rt_runtime_lock);
        /*
         * We cannot be left wanting - that would mean some runtime
         * leaked out of the system.
         */
        BUG_ON(want);
    balanced:
        /*
         * Disable all the borrow logic by pretending we have inf
         * runtime - in which case borrowing doesn't make sense.
         */
        rt_rq->rt_runtime = RUNTIME_INF;
        rt_rq->rt_throttled = 0;
        raw_spin_unlock(&rt_rq->rt_runtime_lock);
        raw_spin_unlock(&rt_b->rt_runtime_lock);

        /* Make rt_rq available for pick_next_task() */
        sched_rt_rq_enqueue(rt_rq);
    }
}

static void __enable_runtime(struct rq *rq)
{
    rt_rq_iter_t iter;
    struct rt_rq *rt_rq;

    if (unlikely(!scheduler_running)) {
        return;
    }

    /*
     * Reset each runqueue's bandwidth settings
     */
    cycle_each_rt_rq(rt_rq, iter, rq) {
        struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);

        raw_spin_lock(&rt_b->rt_runtime_lock);
        raw_spin_lock(&rt_rq->rt_runtime_lock);
        rt_rq->rt_runtime = rt_b->rt_runtime;
        rt_rq->rt_time = 0;
        rt_rq->rt_throttled = 0;
        raw_spin_unlock(&rt_rq->rt_runtime_lock);
        raw_spin_unlock(&rt_b->rt_runtime_lock);
    }
}

static void balance_runtime(struct rt_rq *rt_rq)
{
    if (!sched_feat(RT_RUNTIME_SHARE)) {
        return;
    }

    if (rt_rq->rt_time > rt_rq->rt_runtime) {
        raw_spin_unlock(&rt_rq->rt_runtime_lock);
        do_balance_runtime(rt_rq);
        raw_spin_lock(&rt_rq->rt_runtime_lock);
    }
}
#else  /* !CONFIG_SMP */
static inline void balance_runtime(struct rt_rq *rt_rq)
{
}
#endif /* CONFIG_SMP */

static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
{
    int i, idle = 1, throttled = 0;
    const struct cpumask *span;

    span = sched_rt_period_mask();
#ifdef CONFIG_RT_GROUP_SCHED
    /*
     * When the tasks in the task_group run on either isolated
     * CPUs or non-isolated CPUs, whether they are isolcpus or
     * were isolated via cpusets, check all the online rt_rq
     * to lest the timer run on a CPU which does not service
     * all runqueues, potentially leaving other CPUs indefinitely
     * throttled.
     */
    span = cpu_online_mask;
#endif
    for_each_cpu(i, span)
    {
        int enqueue = 0;
        struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
        struct rq *rq = rq_of_rt_rq(rt_rq);
        int skip;

        /*
         * When span == cpu_online_mask, taking each rq->lock
         * can be time-consuming. Try to avoid it when possible.
         */
        raw_spin_lock(&rt_rq->rt_runtime_lock);
        if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) {
            rt_rq->rt_runtime = rt_b->rt_runtime;
        }
        skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
        raw_spin_unlock(&rt_rq->rt_runtime_lock);
        if (skip) {
            continue;
        }

        raw_spin_lock(&rq->lock);
        update_rq_clock(rq);

        if (rt_rq->rt_time) {
            u64 runtime;

            raw_spin_lock(&rt_rq->rt_runtime_lock);
            if (rt_rq->rt_throttled) {
                balance_runtime(rt_rq);
            }
            runtime = rt_rq->rt_runtime;
            rt_rq->rt_time -= min(rt_rq->rt_time, overrun * runtime);
            if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
                rt_rq->rt_throttled = 0;
                enqueue = 1;

                /*
                 * When we're idle and a woken (rt) task is
                 * throttled check_preempt_curr() will set
                 * skip_update and the time between the wakeup
                 * and this unthrottle will get accounted as
                 * 'runtime'.
                 */
                if (rt_rq->rt_nr_running && rq->curr == rq->idle) {
                    rq_clock_cancel_skipupdate(rq);
                }
            }
            if (rt_rq->rt_time || rt_rq->rt_nr_running) {
                idle = 0;
            }
            raw_spin_unlock(&rt_rq->rt_runtime_lock);
        } else if (rt_rq->rt_nr_running) {
            idle = 0;
            if (!rt_rq_throttled(rt_rq)) {
                enqueue = 1;
            }
        }
        if (rt_rq->rt_throttled) {
            throttled = 1;
        }

        if (enqueue) {
            sched_rt_rq_enqueue(rt_rq);
        }
        raw_spin_unlock(&rq->lock);
    }

    if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)) {
        return 1;
    }

    return idle;
}

static inline int rt_se_prio(struct sched_rt_entity *rt_se)
{
#ifdef CONFIG_RT_GROUP_SCHED
    struct rt_rq *rt_rq = group_rt_rq(rt_se);

    if (rt_rq) {
        return rt_rq->highest_prio.curr;
    }
#endif

    return rt_task_of(rt_se)->prio;
}

static inline void try_start_rt_bandwidth(struct rt_bandwidth *rt_b)
{
    raw_spin_lock(&rt_b->rt_runtime_lock);
    if (!rt_b->rt_period_active) {
        rt_b->rt_period_active = 1;
        hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period);
        hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED_HARD);
    }
    raw_spin_unlock(&rt_b->rt_runtime_lock);
}

static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
{
    u64 runtime = sched_rt_runtime(rt_rq);

    if (rt_rq->rt_throttled) {
        return rt_rq_throttled(rt_rq);
    }

    if (runtime >= sched_rt_period(rt_rq)) {
        return 0;
    }

    balance_runtime(rt_rq);
    runtime = sched_rt_runtime(rt_rq);
    if (runtime == RUNTIME_INF) {
        return 0;
    }

    if (rt_rq->rt_time > runtime) {
        struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);

        /*
         * Don't actually throttle groups that have no runtime assigned
         * but accrue some time due to boosting.
         */
        if (likely(rt_b->rt_runtime)) {
            rt_rq->rt_throttled = 1;
            printk_deferred_once("sched: RT throttling activated\n");
        } else {
            /*
             * In case we did anyway, make it go away,
             * replenishment is a joke, since it will replenish us
             * with exactly 0 ns.
             */
            rt_rq->rt_time = 0;
        }

        if (rt_rq_throttled(rt_rq)) {
            sched_rt_rq_dequeue(rt_rq);
            return 1;
        }
    }

    return 0;
}

/*
 * Update the current task's runtime statistics. Skip current tasks that
 * are not in our scheduling class.
 */
static void update_curr_rt(struct rq *rq)
{
    struct task_struct *curr = rq->curr;
    struct sched_rt_entity *rt_se = &curr->rt;
    u64 delta_exec;
    u64 now;

    if (curr->sched_class != &rt_sched_class) {
        return;
    }

    now = rq_clock_task(rq);
    delta_exec = now - curr->se.exec_start;
    if (unlikely((s64)delta_exec <= 0)) {
        return;
    }

    schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));

    curr->se.sum_exec_runtime += delta_exec;
    account_group_exec_runtime(curr, delta_exec);

    curr->se.exec_start = now;
    cgroup_account_cputime(curr, delta_exec);

    if (!rt_bandwidth_enabled()) {
        return;
    }

    cycle_each_sched_rt_entity(rt_se) {
        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
        int exceeded;

        if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
            raw_spin_lock(&rt_rq->rt_runtime_lock);
            rt_rq->rt_time += delta_exec;
            exceeded = sched_rt_runtime_exceeded(rt_rq);
            if (exceeded) {
                resched_curr(rq);
            }
            raw_spin_unlock(&rt_rq->rt_runtime_lock);
            if (exceeded) {
                try_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
            }
        }
    }
}

static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
{
    struct rq *rq = rq_of_rt_rq(rt_rq);

    BUG_ON(&rq->rt != rt_rq);

    if (!rt_rq->rt_queued) {
        return;
    }

    BUG_ON(!rq->nr_running);

    sub_nr_running(rq, count);
    rt_rq->rt_queued = 0;
}

static void enqueue_top_rt_rq(struct rt_rq *rt_rq)
{
    struct rq *rq = rq_of_rt_rq(rt_rq);

    BUG_ON(&rq->rt != rt_rq);

    if (rt_rq->rt_queued) {
        return;
    }

    if (rt_rq_throttled(rt_rq)) {
        return;
    }

    if (rt_rq->rt_nr_running) {
        add_nr_running(rq, rt_rq->rt_nr_running);
        rt_rq->rt_queued = 1;
    }

    /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
    cpufreq_update_util(rq, 0);
}

#if defined CONFIG_SMP

static void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
    struct rq *rq = rq_of_rt_rq(rt_rq);

#ifdef CONFIG_RT_GROUP_SCHED
    /*
     * Change rq's cpupri only if rt_rq is the top queue.
     */
    if (&rq->rt != rt_rq) {
        return;
    }
#endif
    if (rq->online && prio < prev_prio) {
        cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
    }
}

static void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
    struct rq *rq = rq_of_rt_rq(rt_rq);

#ifdef CONFIG_RT_GROUP_SCHED
    /*
     * Change rq's cpupri only if rt_rq is the top queue.
     */
    if (&rq->rt != rt_rq) {
        return;
    }
#endif
    if (rq->online && rt_rq->highest_prio.curr != prev_prio) {
        cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
    }
}

#else /* CONFIG_SMP */

static inline void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
}
static inline void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
}

#endif /* CONFIG_SMP */

#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
static void inc_rt_prio(struct rt_rq *rt_rq, int prio)
{
    int prev_prio = rt_rq->highest_prio.curr;

    if (prio < prev_prio) {
        rt_rq->highest_prio.curr = prio;
    }

    inc_rt_prio_smp(rt_rq, prio, prev_prio);
}

static void dec_rt_prio(struct rt_rq *rt_rq, int prio)
{
    int prev_prio = rt_rq->highest_prio.curr;

    if (rt_rq->rt_nr_running) {
        WARN_ON(prio < prev_prio);

        /*
         * This may have been our highest task, and therefore
         * we may have some recomputation to do
         */
        if (prio == prev_prio) {
            struct rt_prio_array *array = &rt_rq->active;

            rt_rq->highest_prio.curr = sched_find_first_bit(array->bitmap);
        }
    } else {
        rt_rq->highest_prio.curr = MAX_RT_PRIO;
    }

    dec_rt_prio_smp(rt_rq, prio, prev_prio);
}

#else

static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio)
{
}
static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio)
{
}

#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */

#ifdef CONFIG_RT_GROUP_SCHED

static void inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
    if (rt_se_boosted(rt_se)) {
        rt_rq->rt_nr_boosted++;
    }

    if (rt_rq->tg) {
        start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
    }
}

static void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
    if (rt_se_boosted(rt_se)) {
        rt_rq->rt_nr_boosted--;
    }

    WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
}

#else /* CONFIG_RT_GROUP_SCHED */

static void inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
    start_rt_bandwidth(&def_rt_bandwidth);
}

static inline void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}

#endif /* CONFIG_RT_GROUP_SCHED */

static inline unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
{
    struct rt_rq *group_rq = group_rt_rq(rt_se);

    if (group_rq) {
        return group_rq->rt_nr_running;
    } else {
        return 1;
    }
}

static inline unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
{
    struct rt_rq *group_rq = group_rt_rq(rt_se);
    struct task_struct *tsk;

    if (group_rq) {
        return group_rq->rr_nr_running;
    }

    tsk = rt_task_of(rt_se);

    return (tsk->policy == SCHED_RR) ? 1 : 0;
}

static inline void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
    int prio = rt_se_prio(rt_se);

    WARN_ON(!rt_prio(prio));
    rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
    rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);

    inc_rt_prio(rt_rq, prio);
    inc_rt_migration(rt_se, rt_rq);
    inc_rt_group(rt_se, rt_rq);
}

static inline void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
    WARN_ON(!rt_prio(rt_se_prio(rt_se)));
    WARN_ON(!rt_rq->rt_nr_running);
    rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
    rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);

    dec_rt_prio(rt_rq, rt_se_prio(rt_se));
    dec_rt_migration(rt_se, rt_rq);
    dec_rt_group(rt_se, rt_rq);
}

/*
 * Change rt_se->run_list location unless SAVE && !MOVE
 *
 * assumes ENQUEUE/DEQUEUE flags match
 */
static inline bool move_entity(unsigned int flags)
{
    if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) {
        return false;
    }

    return true;
}

static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
{
    list_del_init(&rt_se->run_list);

    if (list_empty(array->queue + rt_se_prio(rt_se))) {
        __clear_bit(rt_se_prio(rt_se), array->bitmap);
    }

    rt_se->on_list = 0;
}

static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
    struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
    struct rt_prio_array *array = &rt_rq->active;
    struct rt_rq *group_rq = group_rt_rq(rt_se);
    struct list_head *queue = array->queue + rt_se_prio(rt_se);

    /*
     * Don't enqueue the group if its throttled, or when empty.
     * The latter is a consequence of the former when a child group
     * get throttled and the current group doesn't have any other
     * active members.
     */
    if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
        if (rt_se->on_list) {
            __delist_rt_entity(rt_se, array);
        }
        return;
    }

    if (move_entity(flags)) {
        WARN_ON_ONCE(rt_se->on_list);
        if (flags & ENQUEUE_HEAD) {
            list_add(&rt_se->run_list, queue);
        } else {
            list_add_tail(&rt_se->run_list, queue);
        }

        __set_bit(rt_se_prio(rt_se), array->bitmap);
        rt_se->on_list = 1;
    }
    rt_se->on_rq = 1;

    inc_rt_tasks(rt_se, rt_rq);
}

static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
    struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
    struct rt_prio_array *array = &rt_rq->active;

    if (move_entity(flags)) {
        WARN_ON_ONCE(!rt_se->on_list);
        __delist_rt_entity(rt_se, array);
    }
    rt_se->on_rq = 0;

    dec_rt_tasks(rt_se, rt_rq);
}

/*
 * Because the prio of an upper entry depends on the lower
 * entries, we must remove entries top - down.
 */
static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
{
    struct sched_rt_entity *back = NULL;
    unsigned int rt_nr_running;

    cycle_each_sched_rt_entity(rt_se) {
        rt_se->back = back;
        back = rt_se;
    }

    rt_nr_running = rt_rq_of_se(back)->rt_nr_running;

    for (rt_se = back; rt_se; rt_se = rt_se->back) {
        if (on_rt_rq(rt_se)) {
            __dequeue_rt_entity(rt_se, flags);
        }
    }
    dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
}

static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
    struct rq *rq = rq_of_rt_se(rt_se);

    dequeue_rt_stack(rt_se, flags);
    cycle_each_sched_rt_entity(rt_se) __enqueue_rt_entity(rt_se, flags);
    enqueue_top_rt_rq(&rq->rt);
}

static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
    struct rq *rq = rq_of_rt_se(rt_se);

    dequeue_rt_stack(rt_se, flags);

    cycle_each_sched_rt_entity(rt_se) {
        struct rt_rq *rt_rq = group_rt_rq(rt_se);

        if (rt_rq && rt_rq->rt_nr_running) {
            __enqueue_rt_entity(rt_se, flags);
        }
    }
    enqueue_top_rt_rq(&rq->rt);
}

#ifdef CONFIG_SMP
static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p, bool sync)
{
    /*
     * If the waker is CFS, then an RT sync wakeup would preempt the waker
     * and force it to run for a likely small time after the RT wakee is
     * done. So, only honor RT sync wakeups from RT wakers.
     */
    return sync && task_has_rt_policy(rq->curr) && p->prio <= rq->rt.highest_prio.next && rq->rt.rt_nr_running <= 0x2;
}
#else
static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p, bool sync)
{
    return 0;
}
#endif

/*
 * Adding/removing a task to/from a priority array:
 */
static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
    struct sched_rt_entity *rt_se = &p->rt;
    bool sync = !!(flags & ENQUEUE_WAKEUP_SYNC);

    if (flags & ENQUEUE_WAKEUP) {
        rt_se->timeout = 0;
    }

    enqueue_rt_entity(rt_se, flags);
    walt_inc_cumulative_runnable_avg(rq, p);

    if (!task_current(rq, p) && p->nr_cpus_allowed > 1 && !should_honor_rt_sync(rq, p, sync)) {
        enqueue_pushable_task(rq, p);
    }
}

static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
    struct sched_rt_entity *rt_se = &p->rt;

    update_curr_rt(rq);
    dequeue_rt_entity(rt_se, flags);
    walt_dec_cumulative_runnable_avg(rq, p);

    dequeue_pushable_task(rq, p);
}

/*
 * Put task to the head or the end of the run list without the overhead of
 * dequeue followed by enqueue.
 */
static void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
{
    if (on_rt_rq(rt_se)) {
        struct rt_prio_array *array = &rt_rq->active;
        struct list_head *queue = array->queue + rt_se_prio(rt_se);

        if (head) {
            list_move(&rt_se->run_list, queue);
        } else {
            list_move_tail(&rt_se->run_list, queue);
        }
    }
}

static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
{
    struct sched_rt_entity *rt_se = &p->rt;
    struct rt_rq *rt_rq;

    cycle_each_sched_rt_entity(rt_se) {
        rt_rq = rt_rq_of_se(rt_se);
        requeue_rt_entity(rt_rq, rt_se, head);
    }
}

static void yield_task_rt(struct rq *rq)
{
    requeue_task_rt(rq, rq->curr, 0);
}

#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);

static int select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
{
    struct task_struct *curr;
    struct rq *rq;
    struct rq *this_cpu_rq;
    bool test;
    int target_cpu = -1;
    bool sync = !!(flags & WF_SYNC);
    int this_cpu;

    /* For anything but wake ups, just return the task_cpu */
    if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) {
        goto out;
    }

    rq = cpu_rq(cpu);

    rcu_read_lock();
    curr = READ_ONCE(rq->curr); /* unlocked access */
    this_cpu = smp_processor_id();
    this_cpu_rq = cpu_rq(this_cpu);

    /*
     * If the current task on @p's runqueue is an RT task, then
     * try to see if we can wake this RT task up on another
     * runqueue. Otherwise simply start this RT task
     * on its current runqueue.
     *
     * We want to avoid overloading runqueues. If the woken
     * task is a higher priority, then it will stay on this CPU
     * and the lower prio task should be moved to another CPU.
     * Even though this will probably make the lower prio task
     * lose its cache, we do not want to bounce a higher task
     * around just because it gave up its CPU, perhaps for a
     * lock?
     *
     * For equal prio tasks, we just let the scheduler sort it out.
     *
     * Otherwise, just let it ride on the affined RQ and the
     * post-schedule router will push the preempted task away
     *
     * This test is optimistic, if we get it wrong the load-balancer
     * will have to sort it out.
     *
     * We take into account the capacity of the CPU to ensure it fits the
     * requirement of the task - which is only important on heterogeneous
     * systems like big.LITTLE.
     */
    test = curr && unlikely(rt_task(curr)) && (curr->nr_cpus_allowed < 0x2 || curr->prio <= p->prio);

    /*
     * Respect the sync flag as long as the task can run on this CPU.
     */
    if (should_honor_rt_sync(this_cpu_rq, p, sync) && cpumask_test_cpu(this_cpu, p->cpus_ptr)) {
        cpu = this_cpu;
        goto out_unlock;
    }

    if (test || !rt_task_fits_capacity(p, cpu)) {
        int target = find_lowest_rq(p);
        /*
         * Bail out if we were forcing a migration to find a better
         * fitting CPU but our search failed.
         */
        if (!test && target != -1 && !rt_task_fits_capacity(p, target)) {
            goto out_unlock;
        }

        /*
         * Don't bother moving it if the destination CPU is
         * not running a lower priority task.
         */
        if (target != -1 && p->prio < cpu_rq(target)->rt.highest_prio.curr) {
            cpu = target;
        }
    }

out_unlock:
    rcu_read_unlock();

out:
    return cpu;
}

static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
{
    /*
     * Current can't be migrated, useless to reschedule,
     * let's hope p can move out.
     */
    if (rq->curr->nr_cpus_allowed == 1 || !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) {
        return;
    }

    /*
     * p is migratable, so let's not schedule it and
     * see if it is pushed or pulled somewhere else.
     */
    if (p->nr_cpus_allowed != 1 && cpupri_find(&rq->rd->cpupri, p, NULL)) {
        return;
    }

    /*
     * There appear to be other CPUs that can accept
     * the current task but none can run 'p', so lets reschedule
     * to try and push the current task away:
     */
    requeue_task_rt(rq, p, 1);
    resched_curr(rq);
}

static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
{
    if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
        /*
         * This is OK, because current is on_cpu, which avoids it being
         * picked for load-balance and preemption/IRQs are still
         * disabled avoiding further scheduler activity on it and we've
         * not yet started the picking loop.
         */
        rq_unpin_lock(rq, rf);
        pull_rt_task(rq);
        rq_repin_lock(rq, rf);
    }

    return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
}
#endif /* CONFIG_SMP */

/*
 * Preempt the current task with a newly woken task if needed:
 */
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
{
    if (p->prio < rq->curr->prio) {
        resched_curr(rq);
        return;
    }

#ifdef CONFIG_SMP
    /*
     * If:
     *
     * - the newly woken task is of equal priority to the current task
     * - the newly woken task is non-migratable while current is migratable
     * - current will be preempted on the next reschedule
     *
     * we should check to see if current can readily move to a different
     * cpu.  If so, we will reschedule to allow the push logic to try
     * to move current somewhere else, making room for our non-migratable
     * task.
     */
    if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) {
        check_preempt_equal_prio(rq, p);
    }
#endif
}

static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
{
    p->se.exec_start = rq_clock_task(rq);

    /* The running task is never eligible for pushing */
    dequeue_pushable_task(rq, p);

    if (!first) {
        return;
    }

    /*
     * If prev task was rt, put_prev_task() has already updated the
     * utilization. We only care of the case where we start to schedule a
     * rt task
     */
    if (rq->curr->sched_class != &rt_sched_class) {
        update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
    }

    rt_queue_push_tasks(rq);
}

static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, struct rt_rq *rt_rq)
{
    struct rt_prio_array *array = &rt_rq->active;
    struct sched_rt_entity *next = NULL;
    struct list_head *queue;
    int idx;

    idx = sched_find_first_bit(array->bitmap);
    BUG_ON(idx >= MAX_RT_PRIO);

    queue = array->queue + idx;
    next = list_entry(queue->next, struct sched_rt_entity, run_list);

    return next;
}

static struct task_struct *_pick_next_task_rt(struct rq *rq)
{
    struct sched_rt_entity *rt_se;
    struct rt_rq *rt_rq = &rq->rt;

    do {
        rt_se = pick_next_rt_entity(rq, rt_rq);
        BUG_ON(!rt_se);
        rt_rq = group_rt_rq(rt_se);
    } while (rt_rq);

    return rt_task_of(rt_se);
}

static struct task_struct *pick_next_task_rt(struct rq *rq)
{
    struct task_struct *p;

    if (!sched_rt_runnable(rq)) {
        return NULL;
    }

    p = _pick_next_task_rt(rq);
    set_next_task_rt(rq, p, true);
    return p;
}

static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
{
    update_curr_rt(rq);

    update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);

    /*
     * The previous task needs to be made eligible for pushing
     * if it is still active
     */
    if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) {
        enqueue_pushable_task(rq, p);
    }
}

#ifdef CONFIG_SMP

/* Only try algorithms three times */
#define RT_MAX_TRIES 3

static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
    if (!task_running(rq, p) && cpumask_test_cpu(cpu, p->cpus_ptr)) {
        return 1;
    }

    return 0;
}

/*
 * Return the highest pushable rq's task, which is suitable to be executed
 * on the CPU, NULL otherwise
 */
static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
{
    struct plist_head *head = &rq->rt.pushable_tasks;
    struct task_struct *p;

    if (!has_pushable_tasks(rq)) {
        return NULL;
    }

    plist_for_each_entry(p, head, pushable_tasks)
    {
        if (pick_rt_task(rq, p, cpu)) {
            return p;
        }
    }

    return NULL;
}

#ifdef CONFIG_SCHED_RT_CAS
static int find_cas_cpu(struct sched_domain *sd, struct task_struct *task, struct cpumask *lowest_mask)
{
    struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
    struct sched_group *sg = NULL;
    struct sched_group *sg_target = NULL;
    struct sched_group *sg_backup = NULL;
    struct cpumask search_cpu, backup_search_cpu;
    int cpu = -1;
    int target_cpu = -1;
    unsigned long cpu_capacity;
    unsigned long boosted_tutil = uclamp_task_util(task);
    unsigned long target_capacity = ULONG_MAX;
    unsigned long util;
    unsigned long target_cpu_util = ULONG_MAX;
    int prev_cpu = task_cpu(task);
#ifdef CONFIG_SCHED_RTG
    struct cpumask *rtg_target = NULL;
#endif
    bool boosted = uclamp_boosted(task);

    if (!sysctl_sched_enable_rt_cas) {
        return -1;
    }

    rcu_read_lock();

#ifdef CONFIG_SCHED_RTG
    rtg_target = find_rtg_target(task);
#endif

    sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, 0));
    if (!sd) {
        rcu_read_unlock();
        return -1;
    }

    sg = sd->groups;
    do {
        if (!cpumask_intersects(lowest_mask, sched_group_span(sg))) {
            continue;
        }

        if (boosted) {
            if (cpumask_test_cpu(rd->max_cap_orig_cpu, sched_group_span(sg))) {
                sg_target = sg;
                break;
            }
        }

        cpu = group_first_cpu(sg);
#ifdef CONFIG_SCHED_RTG
        /* honor the rtg tasks */
        if (rtg_target) {
            if (cpumask_test_cpu(cpu, rtg_target)) {
                sg_target = sg;
                break;
            }

            /* active LB or big_task favor cpus with more capacity */
            if (task->state == TASK_RUNNING || boosted) {
                if (capacity_orig_of(cpu) > capacity_orig_of(cpumask_any(rtg_target))) {
                    sg_target = sg;
                    break;
                }

                sg_backup = sg;
                continue;
            }
        }
#endif
        /*
         * 1. add margin to support task migration
         * 2. if task_util is high then all cpus, make sure the
         * sg_backup with the most powerful cpus is selected
         */
        if (!rt_task_fits_capacity(task, cpu)) {
            sg_backup = sg;
            continue;
        }

        /* support task boost */
        cpu_capacity = capacity_orig_of(cpu);
        if (boosted_tutil > cpu_capacity) {
            sg_backup = sg;
            continue;
        }

        /* sg_target: select the sg with smaller capacity */
        if (cpu_capacity < target_capacity) {
            target_capacity = cpu_capacity;
            sg_target = sg;
        }
    } while (sg = sg->next, sg != sd->groups);

    if (!sg_target) {
        sg_target = sg_backup;
    }

    if (sg_target) {
        cpumask_and(&search_cpu, lowest_mask, sched_group_span(sg_target));
        cpumask_copy(&backup_search_cpu, lowest_mask);
        cpumask_andnot(&backup_search_cpu, &backup_search_cpu, &search_cpu);
    } else {
        cpumask_copy(&search_cpu, lowest_mask);
        cpumask_clear(&backup_search_cpu);
    }

retry:
    cpu = cpumask_first(&search_cpu);
    do {
        trace_sched_find_cas_cpu_each(task, cpu, target_cpu, cpu_isolated(cpu), idle_cpu(cpu), boosted_tutil,
                                      cpu_util(cpu), capacity_orig_of(cpu));

        if (cpu_isolated(cpu)) {
            continue;
        }

        if (!cpumask_test_cpu(cpu, task->cpus_ptr)) {
            continue;
        }

        /* find best cpu with smallest max_capacity */
        if (target_cpu != -1 && capacity_orig_of(cpu) > capacity_orig_of(target_cpu)) {
            continue;
        }

        util = cpu_util(cpu);
        /* Find the least loaded CPU */
        if (util > target_cpu_util) {
            continue;
        }

        /*
         * If the preivous CPU has same load, keep it as
         * target_cpu
         */
        if (target_cpu_util == util && target_cpu == prev_cpu) {
            continue;
        }

        /*
         * If candidate CPU is the previous CPU, select it.
         * If all above conditions are same, select the least
         * cumulative window demand CPU.
         */
        target_cpu_util = util;
        target_cpu = cpu;
    } while ((cpu = cpumask_next(cpu, &search_cpu)) < nr_cpu_ids);

    if (target_cpu != -1 && cpumask_test_cpu(target_cpu, lowest_mask)) {
        goto done;
    } else if (!cpumask_empty(&backup_search_cpu)) {
        cpumask_copy(&search_cpu, &backup_search_cpu);
        cpumask_clear(&backup_search_cpu);
        goto retry;
    }

done:
    trace_sched_find_cas_cpu(task, lowest_mask, boosted_tutil, prev_cpu, target_cpu);
    rcu_read_unlock();
    return target_cpu;
}
#endif

static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);

static int find_lowest_rq(struct task_struct *task)
{
    struct sched_domain *sd;
    struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
    int this_cpu = smp_processor_id();
    int cpu = task_cpu(task);
    int ret;
#ifdef CONFIG_SCHED_RT_CAS
    int cas_cpu;
#endif

    /* Make sure the mask is initialized first */
    if (unlikely(!lowest_mask)) {
        return -1;
    }

    if (task->nr_cpus_allowed == 1) {
        return -1; /* No other targets possible */
    }

    /*
     * If we're on asym system ensure we consider the different capacities
     * of the CPUs when searching for the lowest_mask.
     */
    if (static_branch_unlikely(&sched_asym_cpucapacity)) {
        ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, task, lowest_mask, rt_task_fits_capacity);
    } else {
        ret = cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask);
    }

    if (!ret) {
        return -1; /* No targets found */
    }

#ifdef CONFIG_SCHED_RT_CAS
    cas_cpu = find_cas_cpu(sd, task, lowest_mask);
    if (cas_cpu != -1) {
        return cas_cpu;
    }
#endif

    /*
     * At this point we have built a mask of CPUs representing the
     * lowest priority tasks in the system.  Now we want to elect
     * the best one based on our affinity and topology.
     *
     * We prioritize the last CPU that the task executed on since
     * it is most likely cache-hot in that location.
     */
    if (cpumask_test_cpu(cpu, lowest_mask)) {
        return cpu;
    }

    /*
     * Otherwise, we consult the sched_domains span maps to figure
     * out which CPU is logically closest to our hot cache data.
     */
    if (!cpumask_test_cpu(this_cpu, lowest_mask)) {
        this_cpu = -1; /* Skip this_cpu opt if not among lowest */
    }

    rcu_read_lock();
    for_each_domain(cpu, sd)
    {
        if (sd->flags & SD_WAKE_AFFINE) {
            int best_cpu;

            /*
             * "this_cpu" is cheaper to preempt than a
             * remote processor.
             */
            if (this_cpu != -1 && cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
                rcu_read_unlock();
                return this_cpu;
            }

            best_cpu = cpumask_first_and(lowest_mask, sched_domain_span(sd));
            if (best_cpu < nr_cpu_ids) {
                rcu_read_unlock();
                return best_cpu;
            }
        }
    }
    rcu_read_unlock();

    /*
     * And finally, if there were no matches within the domains
     * just give the caller *something* to work with from the compatible
     * locations.
     */
    if (this_cpu != -1) {
        return this_cpu;
    }

    cpu = cpumask_any(lowest_mask);
    if (cpu < nr_cpu_ids) {
        return cpu;
    }

    return -1;
}

static struct task_struct *pick_next_pushable_task(struct rq *rq)
{
    struct task_struct *p;

    if (!has_pushable_tasks(rq)) {
        return NULL;
    }

    p = plist_first_entry(&rq->rt.pushable_tasks, struct task_struct, pushable_tasks);

    BUG_ON(rq->cpu != task_cpu(p));
    BUG_ON(task_current(rq, p));
    BUG_ON(p->nr_cpus_allowed <= 1);

    BUG_ON(!task_on_rq_queued(p));
    BUG_ON(!rt_task(p));

    return p;
}

/* Will lock the rq it finds */
static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
{
    struct rq *lowest_rq = NULL;
    int tries;
    int cpu;

    for (tries = 0; tries < RT_MAX_TRIES; tries++) {
        cpu = find_lowest_rq(task);
        if ((cpu == -1) || (cpu == rq->cpu)) {
            break;
        }

        lowest_rq = cpu_rq(cpu);
        if (lowest_rq->rt.highest_prio.curr <= task->prio) {
            /*
             * Target rq has tasks of equal or higher priority,
             * retrying does not release any lock and is unlikely
             * to yield a different result.
             */
            lowest_rq = NULL;
            break;
        }

        /* if the prio of this runqueue changed, try again */
        if (double_lock_balance(rq, lowest_rq)) {
            /*
             * We had to unlock the run queue. In
             * the mean time, task could have
             * migrated already or had its affinity changed.
             */
            struct task_struct *next_task = pick_next_pushable_task(rq);
            if (unlikely(next_task != task || !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr))) {
                double_unlock_balance(rq, lowest_rq);
                lowest_rq = NULL;
                break;
            }
        }

        /* If this rq is still suitable use it. */
        if (lowest_rq->rt.highest_prio.curr > task->prio) {
            break;
        }

        /* try again */
        double_unlock_balance(rq, lowest_rq);
        lowest_rq = NULL;
    }

    return lowest_rq;
}

/*
 * If the current CPU has more than one RT task, see if the non
 * running task can migrate over to a CPU that is running a task
 * of lesser priority.
 */
static int push_rt_task(struct rq *rq)
{
    struct task_struct *next_task;
    struct rq *lowest_rq;
    int ret = 0;

    if (!rq->rt.overloaded) {
        return 0;
    }

    next_task = pick_next_pushable_task(rq);
    if (!next_task) {
        return 0;
    }

retry:
    if (WARN_ON(next_task == rq->curr)) {
        return 0;
    }

    /*
     * It's possible that the next_task slipped in of
     * higher priority than current. If that's the case
     * just reschedule current.
     */
    if (unlikely(next_task->prio < rq->curr->prio)) {
        resched_curr(rq);
        return 0;
    }

    /* We might release rq lock */
    get_task_struct(next_task);

    /* find_lock_lowest_rq locks the rq if found */
    lowest_rq = find_lock_lowest_rq(next_task, rq);
    if (!lowest_rq) {
        struct task_struct *task;
        /*
         * find_lock_lowest_rq releases rq->lock
         * so it is possible that next_task has migrated.
         *
         * We need to make sure that the task is still on the same
         * run-queue and is also still the next task eligible for
         * pushing.
         */
        task = pick_next_pushable_task(rq);
        if (task == next_task) {
            /*
             * The task hasn't migrated, and is still the next
             * eligible task, but we failed to find a run-queue
             * to push it to.  Do not retry in this case, since
             * other CPUs will pull from us when ready.
             */
            goto out;
        }

        if (!task) {
            /* No more tasks, just exit */
            goto out;
        }

        /*
         * Something has shifted, try again.
         */
        put_task_struct(next_task);
        next_task = task;
        goto retry;
    }

    deactivate_task(rq, next_task, 0);
    set_task_cpu(next_task, lowest_rq->cpu);
    activate_task(lowest_rq, next_task, 0);
    ret = 1;

    resched_curr(lowest_rq);

    double_unlock_balance(rq, lowest_rq);

out:
    put_task_struct(next_task);

    return ret;
}

static void push_rt_tasks(struct rq *rq)
{
    /* push_rt_task will return true if it moved an RT */
    while (push_rt_task(rq)) {
        ;
    }
}

#ifdef HAVE_RT_PUSH_IPI

/*
 * When a high priority task schedules out from a CPU and a lower priority
 * task is scheduled in, a check is made to see if there's any RT tasks
 * on other CPUs that are waiting to run because a higher priority RT task
 * is currently running on its CPU. In this case, the CPU with multiple RT
 * tasks queued on it (overloaded) needs to be notified that a CPU has opened
 * up that may be able to run one of its non-running queued RT tasks.
 *
 * All CPUs with overloaded RT tasks need to be notified as there is currently
 * no way to know which of these CPUs have the highest priority task waiting
 * to run. Instead of trying to take a spinlock on each of these CPUs,
 * which has shown to cause large latency when done on machines with many
 * CPUs, sending an IPI to the CPUs to have them push off the overloaded
 * RT tasks waiting to run.
 *
 * Just sending an IPI to each of the CPUs is also an issue, as on large
 * count CPU machines, this can cause an IPI storm on a CPU, especially
 * if its the only CPU with multiple RT tasks queued, and a large number
 * of CPUs scheduling a lower priority task at the same time.
 *
 * Each root domain has its own irq work function that can iterate over
 * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
 * tassk must be checked if there's one or many CPUs that are lowering
 * their priority, there's a single irq work iterator that will try to
 * push off RT tasks that are waiting to run.
 *
 * When a CPU schedules a lower priority task, it will kick off the
 * irq work iterator that will jump to each CPU with overloaded RT tasks.
 * As it only takes the first CPU that schedules a lower priority task
 * to start the process, the rto_start variable is incremented and if
 * the atomic result is one, then that CPU will try to take the rto_lock.
 * This prevents high contention on the lock as the process handles all
 * CPUs scheduling lower priority tasks.
 *
 * All CPUs that are scheduling a lower priority task will increment the
 * rt_loop_next variable. This will make sure that the irq work iterator
 * checks all RT overloaded CPUs whenever a CPU schedules a new lower
 * priority task, even if the iterator is in the middle of a scan. Incrementing
 * the rt_loop_next will cause the iterator to perform another scan.
 *
 */
static int rto_next_cpu(struct root_domain *rd)
{
    int next;
    int cpu;

    /*
     * When starting the IPI RT pushing, the rto_cpu is set to -1,
     * rt_next_cpu() will simply return the first CPU found in
     * the rto_mask.
     *
     * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
     * will return the next CPU found in the rto_mask.
     *
     * If there are no more CPUs left in the rto_mask, then a check is made
     * against rto_loop and rto_loop_next. rto_loop is only updated with
     * the rto_lock held, but any CPU may increment the rto_loop_next
     * without any locking.
     */
    for (;;) {
        /* When rto_cpu is -1 this acts like cpumask_first() */
        cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);

        rd->rto_cpu = cpu;

        if (cpu < nr_cpu_ids) {
            return cpu;
        }

        rd->rto_cpu = -1;

        /*
         * ACQUIRE ensures we see the @rto_mask changes
         * made prior to the @next value observed.
         *
         * Matches WMB in rt_set_overload().
         */
        next = atomic_read_acquire(&rd->rto_loop_next);
        if (rd->rto_loop == next) {
            break;
        }

        rd->rto_loop = next;
    }

    return -1;
}

static inline bool rto_start_trylock(atomic_t *v)
{
    return !atomic_cmpxchg_acquire(v, 0, 1);
}

static inline void rto_start_unlock(atomic_t *v)
{
    atomic_set_release(v, 0);
}

static void tell_cpu_to_push(struct rq *rq)
{
    int cpu = -1;

    /* Keep the loop going if the IPI is currently active */
    atomic_inc(&rq->rd->rto_loop_next);

    /* Only one CPU can initiate a loop at a time */
    if (!rto_start_trylock(&rq->rd->rto_loop_start)) {
        return;
    }

    raw_spin_lock(&rq->rd->rto_lock);

    /*
     * The rto_cpu is updated under the lock, if it has a valid CPU
     * then the IPI is still running and will continue due to the
     * update to loop_next, and nothing needs to be done here.
     * Otherwise it is finishing up and an ipi needs to be sent.
     */
    if (rq->rd->rto_cpu < 0) {
        cpu = rto_next_cpu(rq->rd);
    }

    raw_spin_unlock(&rq->rd->rto_lock);

    rto_start_unlock(&rq->rd->rto_loop_start);

    if (cpu >= 0) {
        /* Make sure the rd does not get freed while pushing */
        sched_get_rd(rq->rd);
        irq_work_queue_on(&rq->rd->rto_push_work, cpu);
    }
}

/* Called from hardirq context */
void rto_push_irq_work_func(struct irq_work *work)
{
    struct root_domain *rd = container_of(work, struct root_domain, rto_push_work);
    struct rq *rq;
    int cpu;

    rq = this_rq();
    /*
     * We do not need to grab the lock to check for has_pushable_tasks.
     * When it gets updated, a check is made if a push is possible.
     */
    if (has_pushable_tasks(rq)) {
        raw_spin_lock(&rq->lock);
        push_rt_tasks(rq);
        raw_spin_unlock(&rq->lock);
    }

    raw_spin_lock(&rd->rto_lock);

    /* Pass the IPI to the next rt overloaded queue */
    cpu = rto_next_cpu(rd);

    raw_spin_unlock(&rd->rto_lock);

    if (cpu < 0) {
        sched_put_rd(rd);
        return;
    }

    /* Try the next RT overloaded CPU */
    irq_work_queue_on(&rd->rto_push_work, cpu);
}
#endif /* HAVE_RT_PUSH_IPI */

static void pull_rt_task(struct rq *this_rq)
{
    int this_cpu = this_rq->cpu, cpu;
    bool resched = false;
    struct task_struct *p;
    struct rq *src_rq;
    int rt_overload_count = rt_overloaded(this_rq);
    if (likely(!rt_overload_count)) {
        return;
    }

    /*
     * Match the barrier from rt_set_overloaded; this guarantees that if we
     * see overloaded we must also see the rto_mask bit.
     */
    smp_rmb();

    /* If we are the only overloaded CPU do nothing */
    if (rt_overload_count == 1 && cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) {
        return;
    }

#ifdef HAVE_RT_PUSH_IPI
    if (sched_feat(RT_PUSH_IPI)) {
        tell_cpu_to_push(this_rq);
        return;
    }
#endif

    for_each_cpu(cpu, this_rq->rd->rto_mask)
    {
        if (this_cpu == cpu) {
            continue;
        }

        src_rq = cpu_rq(cpu);
        /*
         * Don't bother taking the src_rq->lock if the next highest
         * task is known to be lower-priority than our current task.
         * This may look racy, but if this value is about to go
         * logically higher, the src_rq will push this task away.
         * And if its going logically lower, we do not care
         */
        if (src_rq->rt.highest_prio.next >= this_rq->rt.highest_prio.curr) {
            continue;
        }

        /*
         * We can potentially drop this_rq's lock in
         * double_lock_balance, and another CPU could
         * alter this_rq
         */
        double_lock_balance(this_rq, src_rq);

        /*
         * We can pull only a task, which is pushable
         * on its rq, and no others.
         */
        p = pick_highest_pushable_task(src_rq, this_cpu);
        /*
         * Do we have an RT task that preempts
         * the to-be-scheduled task?
         */
        if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
            WARN_ON(p == src_rq->curr);
            WARN_ON(!task_on_rq_queued(p));

            /*
             * There's a chance that p is higher in priority
             * than what's currently running on its CPU.
             * This is just that p is wakeing up and hasn't
             * had a chance to schedule. We only pull
             * p if it is lower in priority than the
             * current task on the run queue
             */
            if (p->prio < src_rq->curr->prio) {
                goto skip;
            }

            resched = true;

            deactivate_task(src_rq, p, 0);
            set_task_cpu(p, this_cpu);
            activate_task(this_rq, p, 0);
            /*
             * We continue with the search, just in
             * case there's an even higher prio task
             * in another runqueue. (low likelihood
             * but possible)
             */
        }
    skip:
        double_unlock_balance(this_rq, src_rq);
    }

    if (resched) {
        resched_curr(this_rq);
    }
}

/*
 * If we are not running and we are not going to reschedule soon, we should
 * try to push tasks away now
 */
static void task_woken_rt(struct rq *rq, struct task_struct *p)
{
    bool need_to_push = !task_running(rq, p) && !test_tsk_need_resched(rq->curr) && p->nr_cpus_allowed > 1 &&
                        (dl_task(rq->curr) || rt_task(rq->curr)) &&
                        (rq->curr->nr_cpus_allowed < 2 || rq->curr->prio <= p->prio);
    if (need_to_push) {
        push_rt_tasks(rq);
    }
}

/* Assumes rq->lock is held */
static void rq_online_rt(struct rq *rq)
{
    if (rq->rt.overloaded) {
        rt_set_overload(rq);
    }

    __enable_runtime(rq);

    cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
}

/* Assumes rq->lock is held */
static void rq_offline_rt(struct rq *rq)
{
    if (rq->rt.overloaded) {
        rt_clear_overload(rq);
    }

    __disable_runtime(rq);

    cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
}

/*
 * When switch from the rt queue, we bring ourselves to a position
 * that we might want to pull RT tasks from other runqueues.
 */
static void switched_from_rt(struct rq *rq, struct task_struct *p)
{
    /*
     * If there are other RT tasks then we will reschedule
     * and the scheduling of the other RT tasks will handle
     * the balancing. But if we are the last RT task
     * we may need to handle the pulling of RT tasks
     * now.
     */
    if (!task_on_rq_queued(p) || rq->rt.rt_nr_running || cpu_isolated(cpu_of(rq))) {
        return;
    }

    rt_queue_pull_task(rq);
}

void __init init_sched_rt_class(void)
{
    unsigned int i;

    for_each_possible_cpu(i)
    {
        zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), GFP_KERNEL, cpu_to_node(i));
    }
}
#endif /* CONFIG_SMP */

/*
 * When switching a task to RT, we may overload the runqueue
 * with RT tasks. In this case we try to push them off to
 * other runqueues.
 */
static void switched_to_rt(struct rq *rq, struct task_struct *p)
{
    /*
     * If we are running, update the avg_rt tracking, as the running time
     * will now on be accounted into the latter.
     */
    if (task_current(rq, p)) {
        update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
        return;
    }

    /*
     * If we are not running we may need to preempt the current
     * running task. If that current running task is also an RT task
     * then see if we can move to another run queue.
     */
    if (task_on_rq_queued(p)) {
#ifdef CONFIG_SMP
        if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) {
            rt_queue_push_tasks(rq);
        }
#endif /* CONFIG_SMP */
        if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) {
            resched_curr(rq);
        }
    }
}

/*
 * Priority of the task has changed. This may cause
 * us to initiate a push or pull.
 */
static void prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
{
    if (!task_on_rq_queued(p)) {
        return;
    }

    if (rq->curr == p) {
#ifdef CONFIG_SMP
        /*
         * If our priority decreases while running, we
         * may need to pull tasks to this runqueue.
         */
        if (oldprio < p->prio) {
            rt_queue_pull_task(rq);
        }

        /*
         * If there's a higher priority task waiting to run
         * then reschedule.
         */
        if (p->prio > rq->rt.highest_prio.curr) {
            resched_curr(rq);
        }
#else
        /* For UP simply resched on drop of prio */
        if (oldprio < p->prio) {
            resched_curr(rq);
        }
#endif /* CONFIG_SMP */
    } else {
        /*
         * This task is not running, but if it is
         * greater than the current running task
         * then reschedule.
         */
        if (p->prio < rq->curr->prio) {
            resched_curr(rq);
        }
    }
}

#ifdef CONFIG_POSIX_TIMERS
static void watchdog(struct rq *rq, struct task_struct *p)
{
    unsigned long soft, hard;

    /* max may change after cur was read, this will be fixed next tick */
    soft = task_rlimit(p, RLIMIT_RTTIME);
    hard = task_rlimit_max(p, RLIMIT_RTTIME);

    if (soft != RLIM_INFINITY) {
        unsigned long next;

        if (p->rt.watchdog_stamp != jiffies) {
            p->rt.timeout++;
            p->rt.watchdog_stamp = jiffies;
        }

        next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC / HZ);
        if (p->rt.timeout > next) {
            posix_cputimers_rt_watchdog(&p->posix_cputimers, p->se.sum_exec_runtime);
        }
    }
}
#else
static inline void watchdog(struct rq *rq, struct task_struct *p)
{
}
#endif

/*
 * scheduler tick hitting a task of our scheduling class.
 *
 * NOTE: This function can be called remotely by the tick offload that
 * goes along full dynticks. Therefore no local assumption can be made
 * and everything must be accessed through the @rq and @curr passed in
 * parameters.
 */
static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
{
    struct sched_rt_entity *rt_se = &p->rt;

    update_curr_rt(rq);
    update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);

    watchdog(rq, p);

    /*
     * RR tasks need a special form of timeslice management.
     * FIFO tasks have no timeslices.
     */
    if (p->policy != SCHED_RR) {
        return;
    }

    if (--p->rt.time_slice) {
        return;
    }

    p->rt.time_slice = sched_rr_timeslice;

    /*
     * Requeue to the end of queue if we (and all of our ancestors) are not
     * the only element on the queue
     */
    cycle_each_sched_rt_entity(rt_se) {
        if (rt_se->run_list.prev != rt_se->run_list.next) {
            requeue_task_rt(rq, p, 0);
            resched_curr(rq);
            return;
        }
    }
}

#ifdef CONFIG_SCHED_RT_ACTIVE_LB
static int rt_active_load_balance_cpu_stop(void *data)
{
    struct rq *busiest_rq = data;
    struct task_struct *next_task = busiest_rq->rt_push_task;
    struct rq *lowest_rq = NULL;
    unsigned long flags;

    raw_spin_lock_irqsave(&busiest_rq->lock, flags);
    busiest_rq->rt_active_balance = 0;

    /* find_lock_lowest_rq locks the rq if found */
    lowest_rq = find_lock_lowest_rq(next_task, busiest_rq);
    if (!lowest_rq) {
        goto out;
    }

    if (capacity_orig_of(cpu_of(lowest_rq)) <= capacity_orig_of(task_cpu(next_task))) {
        goto unlock;
    }

    deactivate_task(busiest_rq, next_task, 0);
    set_task_cpu(next_task, lowest_rq->cpu);
    activate_task(lowest_rq, next_task, 0);

    resched_curr(lowest_rq);
unlock:
    double_unlock_balance(busiest_rq, lowest_rq);
out:
    put_task_struct(next_task);
    raw_spin_unlock_irqrestore(&busiest_rq->lock, flags);

    return 0;
}

static void check_for_migration_rt(struct rq *rq, struct task_struct *p)
{
    bool need_actvie_lb = false;
    bool misfit_task = false;
    int cpu = task_cpu(p);
    unsigned long cpu_orig_cap;
#ifdef CONFIG_SCHED_RTG
    struct cpumask *rtg_target = NULL;
#endif

    if (!sysctl_sched_enable_rt_active_lb) {
        return;
    }

    if (p->nr_cpus_allowed == 1) {
        return;
    }

    cpu_orig_cap = capacity_orig_of(cpu);
    /* cpu has max capacity, no need to do balance */
    if (cpu_orig_cap == rq->rd->max_cpu_capacity) {
        return;
    }

#ifdef CONFIG_SCHED_RTG
    rtg_target = find_rtg_target(p);
    if (rtg_target) {
        misfit_task = capacity_orig_of(cpumask_first(rtg_target)) > cpu_orig_cap;
    } else {
        misfit_task = !rt_task_fits_capacity(p, cpu);
    }
#else
    misfit_task = !rt_task_fits_capacity(p, cpu);
#endif
    if (misfit_task) {
        raw_spin_lock(&rq->lock);
        if (!rq->active_balance && !rq->rt_active_balance) {
            rq->rt_active_balance = 1;
            rq->rt_push_task = p;
            get_task_struct(p);
            need_actvie_lb = true;
        }
        raw_spin_unlock(&rq->lock);

        if (need_actvie_lb) {
            stop_one_cpu_nowait(task_cpu(p), rt_active_load_balance_cpu_stop, rq, &rq->rt_active_balance_work);
        }
    }
}
#endif

static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
{
    /*
     * Time slice is 0 for SCHED_FIFO tasks
     */
    if (task->policy == SCHED_RR) {
        return sched_rr_timeslice;
    } else {
        return 0;
    }
}

const struct sched_class rt_sched_class __section("__rt_sched_class") = {
    .enqueue_task = enqueue_task_rt,
    .dequeue_task = dequeue_task_rt,
    .yield_task = yield_task_rt,

    .check_preempt_curr = check_preempt_curr_rt,

    .pick_next_task = pick_next_task_rt,
    .put_prev_task = put_prev_task_rt,
    .set_next_task = set_next_task_rt,

#ifdef CONFIG_SMP
    .balance = balance_rt,
    .select_task_rq = select_task_rq_rt,
    .set_cpus_allowed = set_cpus_allowed_common,
    .rq_online = rq_online_rt,
    .rq_offline = rq_offline_rt,
    .task_woken = task_woken_rt,
    .switched_from = switched_from_rt,
#endif

    .task_tick = task_tick_rt,

    .get_rr_interval = get_rr_interval_rt,

    .prio_changed = prio_changed_rt,
    .switched_to = switched_to_rt,

    .update_curr = update_curr_rt,

#ifdef CONFIG_UCLAMP_TASK
    .uclamp_enabled = 1,
#endif
#ifdef CONFIG_SCHED_WALT
    .fixup_walt_sched_stats = fixup_walt_sched_stats_common,
#endif
#ifdef CONFIG_SCHED_RT_ACTIVE_LB
    .check_for_migration = check_for_migration_rt,
#endif
};

#ifdef CONFIG_RT_GROUP_SCHED
/*
 * Ensure that the real time constraints are schedulable.
 */
static DEFINE_MUTEX(rt_constraints_mutex);

static inline int tg_has_rt_tasks(struct task_group *tg)
{
    struct task_struct *task;
    struct css_task_iter it;
    int ret = 0;

    /*
     * Autogroups do not have RT tasks; see autogroup_create().
     */
    if (task_group_is_autogroup(tg)) {
        return 0;
    }

    css_task_iter_start(&tg->css, 0, &it);
    while (!ret && (task = css_task_iter_next(&it))) {
        ret |= rt_task(task);
    }
    css_task_iter_end(&it);

    return ret;
}

struct rt_schedulable_data {
    struct task_group *tg;
    u64 rt_period;
    u64 rt_runtime;
};

static int tg_rt_schedulable(struct task_group *tg, void *data)
{
    struct rt_schedulable_data *d = data;
    struct task_group *child;
    unsigned long total, sum = 0;
    u64 period, runtime;

    period = ktime_to_ns(tg->rt_bandwidth.rt_period);
    runtime = tg->rt_bandwidth.rt_runtime;

    if (tg == d->tg) {
        period = d->rt_period;
        runtime = d->rt_runtime;
    }

    /*
     * Cannot have more runtime than the period.
     */
    if (runtime > period && runtime != RUNTIME_INF) {
        return -EINVAL;
    }

    /*
     * Ensure we don't starve existing RT tasks if runtime turns zero.
     */
    if (rt_bandwidth_enabled() && !runtime && tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg)) {
        return -EBUSY;
    }

    total = to_ratio(period, runtime);
    /*
     * Nobody can have more than the global setting allows.
     */
    if (total > to_ratio(global_rt_period(), global_rt_runtime())) {
        return -EINVAL;
    }

    /*
     * The sum of our children's runtime should not exceed our own.
     */
    list_for_each_entry_rcu(child, &tg->children, siblings)
    {
        period = ktime_to_ns(child->rt_bandwidth.rt_period);
        runtime = child->rt_bandwidth.rt_runtime;

        if (child == d->tg) {
            period = d->rt_period;
            runtime = d->rt_runtime;
        }

        sum += to_ratio(period, runtime);
    }

    if (sum > total) {
        return -EINVAL;
    }

    return 0;
}

static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
    int ret;

    struct rt_schedulable_data data = {
        .tg = tg,
        .rt_period = period,
        .rt_runtime = runtime,
    };

    rcu_read_lock();
    ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
    rcu_read_unlock();

    return ret;
}

static int tg_set_rt_bandwidth(struct task_group *tg, u64 rt_period, u64 rt_runtime)
{
    int i, err = 0;

    /*
     * Disallowing the root group RT runtime is BAD, it would disallow the
     * kernel creating (and or operating) RT threads.
     */
    if (tg == &root_task_group && rt_runtime == 0) {
        return -EINVAL;
    }

    /* No period doesn't make any sense. */
    if (rt_period == 0) {
        return -EINVAL;
    }

    /*
     * Bound quota to defend quota against overflow during bandwidth shift.
     */
    if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime) {
        return -EINVAL;
    }

    mutex_lock(&rt_constraints_mutex);
    err = __rt_schedulable(tg, rt_period, rt_runtime);
    if (err) {
        goto unlock;
    }

    raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
    tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
    tg->rt_bandwidth.rt_runtime = rt_runtime;

    for_each_possible_cpu(i)
    {
        struct rt_rq *rt_rq = tg->rt_rq[i];

        raw_spin_lock(&rt_rq->rt_runtime_lock);
        rt_rq->rt_runtime = rt_runtime;
        raw_spin_unlock(&rt_rq->rt_runtime_lock);
    }
    raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock:
    mutex_unlock(&rt_constraints_mutex);

    return err;
}

int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
{
    u64 rt_runtime, rt_period;

    rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
    rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
    if (rt_runtime_us < 0) {
        rt_runtime = RUNTIME_INF;
    } else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC) {
        return -EINVAL;
    }

    return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
}

long sched_group_rt_runtime(struct task_group *tg)
{
    u64 rt_runtime_us;

    if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) {
        return -1;
    }

    rt_runtime_us = tg->rt_bandwidth.rt_runtime;
    do_div(rt_runtime_us, NSEC_PER_USEC);
    return rt_runtime_us;
}

int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
{
    u64 rt_runtime, rt_period;

    if (rt_period_us > U64_MAX / NSEC_PER_USEC) {
        return -EINVAL;
    }

    rt_period = rt_period_us * NSEC_PER_USEC;
    rt_runtime = tg->rt_bandwidth.rt_runtime;

    return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
}

long sched_group_rt_period(struct task_group *tg)
{
    u64 rt_period_us;

    rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
    do_div(rt_period_us, NSEC_PER_USEC);
    return rt_period_us;
}

static int sched_rt_global_constraints(void)
{
    int ret = 0;

    mutex_lock(&rt_constraints_mutex);
    ret = __rt_schedulable(NULL, 0, 0);
    mutex_unlock(&rt_constraints_mutex);

    return ret;
}

int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
{
    /* Don't accept realtime tasks when there is no way for them to run */
    if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) {
        return 0;
    }

    return 1;
}

#else  /* !CONFIG_RT_GROUP_SCHED */
static int sched_rt_global_constraints(void)
{
    unsigned long flags;
    int i;

    raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
    for_each_possible_cpu(i)
    {
        struct rt_rq *rt_rq = &cpu_rq(i)->rt;

        raw_spin_lock(&rt_rq->rt_runtime_lock);
        rt_rq->rt_runtime = global_rt_runtime();
        raw_spin_unlock(&rt_rq->rt_runtime_lock);
    }
    raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);

    return 0;
}
#endif /* CONFIG_RT_GROUP_SCHED */

static int sched_rt_global_validate(void)
{
    if (sysctl_sched_rt_period <= 0) {
        return -EINVAL;
    }

    if ((sysctl_sched_rt_runtime != RUNTIME_INF) && ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
                                                     ((u64)sysctl_sched_rt_runtime * NSEC_PER_USEC > max_rt_runtime))) {
        return -EINVAL;
    }

    return 0;
}

static void sched_rt_do_global(void)
{
    raw_spin_lock(&def_rt_bandwidth.rt_runtime_lock);
    def_rt_bandwidth.rt_runtime = global_rt_runtime();
    def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
    raw_spin_unlock(&def_rt_bandwidth.rt_runtime_lock);
}

int sched_rt_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos)
{
    int old_period, old_runtime;
    static DEFINE_MUTEX(mutex);
    int ret;

    mutex_lock(&mutex);
    old_period = sysctl_sched_rt_period;
    old_runtime = sysctl_sched_rt_runtime;

    ret = proc_dointvec(table, write, buffer, lenp, ppos);
    if (!ret && write) {
        ret = sched_rt_global_validate();
        if (ret) {
            goto undo;
        }

        ret = sched_dl_global_validate();
        if (ret) {
            goto undo;
        }

        ret = sched_rt_global_constraints();
        if (ret) {
            goto undo;
        }

        sched_rt_do_global();
        sched_dl_do_global();
    }
    if (0) {
    undo:
        sysctl_sched_rt_period = old_period;
        sysctl_sched_rt_runtime = old_runtime;
    }
    mutex_unlock(&mutex);

    return ret;
}

int sched_rr_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos)
{
    int ret;
    static DEFINE_MUTEX(mutex);

    mutex_lock(&mutex);
    ret = proc_dointvec(table, write, buffer, lenp, ppos);
    /*
     * Make sure that internally we keep jiffies.
     * Also, writing zero resets the timeslice to default:
     */
    if (!ret && write) {
        sched_rr_timeslice =
            sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE : msecs_to_jiffies(sysctl_sched_rr_timeslice);
    }
    mutex_unlock(&mutex);

    return ret;
}

#ifdef CONFIG_SCHED_DEBUG
void print_rt_stats(struct seq_file *m, int cpu)
{
    rt_rq_iter_t iter;
    struct rt_rq *rt_rq;

    rcu_read_lock();
    cycle_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) print_rt_rq(m, cpu, rt_rq);
    rcu_read_unlock();
}
#endif /* CONFIG_SCHED_DEBUG */
