#include "internal.h"

static inline bool sched_fair_runnable(struct rq *rq)
{
	return rq->cfs.nr_queued > 0;
}

/*
 * It checks each scheduling domain to see if it is due to be balanced,
 * and initiates a balancing operation if so.
 *
 * Balancing parameters are set up in init_sched_domains.
 */
static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
{
}

/*
 * This softirq handler is triggered via SCHED_SOFTIRQ from two places:
 *
 * - directly from the local sched_tick() for periodic load balancing
 *
 * - indirectly from a remote sched_tick() for NOHZ idle balancing
 *   through the SMP cross-call nohz_csd_func()
 */
static __latent_entropy void sched_balance_softirq(void)
{
    struct rq *_this_rq = this_rq();
    enum cpu_idle_type idle;

    sched_balance_domains(_this_rq, idle);
}

/*
 * sched_balance_newidle is called by schedule() if this_cpu is about to become
 * idle. Attempts to pull tasks from other CPUs.
 *
 * Returns:
 *   < 0 - we released the lock and there are !fair tasks present
 *     0 - failed, no new tasks
 *   > 0 - success, new (fair) tasks present
 */
static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
{
    int pulled_task = 0;

    return pulled_task;
}

static int balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
    if (sched_fair_runnable(rq))
        return 1;

    return sched_balance_newidle(rq, rf) != 0;
}
