#include <linux/cpuset.h>

#ifdef CONFIG_SMP
#define task_cpu_possible_mask(p)	cpu_possible_mask()

/*
 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
 *
 * A few notes on cpu_active vs cpu_online:
 *
 *  - cpu_active must be a subset of cpu_online
 *
 *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
 *    see __set_cpus_allowed_ptr(). At this point the newly online
 *    CPU isn't yet part of the sched domains, and balancing will not
 *    see it.
 *
 *  - on CPU-down we clear cpu_active() to mask the sched domains and
 *    avoid the load balancer to place new tasks on the to be removed
 *    CPU. Existing tasks will remain running there and will be taken
 *    off.
 *
 * This means that fallback selection must not select !active CPUs.
 * And can assume that any active CPU must be online. Conversely
 * select_task_rq() below may allow selection of !active CPUs in order
 * to satisfy the above rules.
 */
static int select_fallback_rq(int cpu, struct task_struct *p)
{
    int nid = cpu_to_node(cpu);
    const struct cpumask *nodemask = NULL;
    enum
    {
        cpuset,
        possible,
        fail
    } state = cpuset;
    int dest_cpu;

    for (;;)
    {
        /* Any allowed, online CPU? */
        for_each_cpu(dest_cpu, p->cpus_ptr)
        {
            if (!is_cpu_allowed(p, dest_cpu))
                continue;

            goto out;
        }

        /* No more Mr. Nice Guy. */
        switch (state)
        {
        case cpuset:
            if (cpuset_cpus_allowed_fallback(p))
            {
                state = possible;
                break;
            }
            fallthrough;
        case possible:
            /*
             * XXX When called from select_task_rq() we only
             * hold p->pi_lock and again violate locking order.
             *
             * More yuck to audit.
             */
            do_set_cpus_allowed(p, task_cpu_possible_mask(p));
            state = fail;
            break;
        case fail:
            BUG();
            break;
        }
    }

out:

    return dest_cpu;
}

/*
 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
 */
static int select_task_rq(struct task_struct *p, int cpu, int *wake_flags)
{
    lockdep_assert_held(&p->pi_lock);

    if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p))
    {
        cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags);
        *wake_flags |= WF_RQ_SELECTED;
    }
    else
    {
        cpu = cpumask_any(p->cpus_ptr);
    }

    /*
     * In order not to call set_task_cpu() on a blocking task we need
     * to rely on ttwu() to place the task on a valid ->cpus_ptr
     * CPU.
     *
     * Since this is common to all placement strategies, this lives here.
     *
     * [ this allows ->select_task() to simply return task_cpu(p) and
     *   not worry about this generic constraint ]
     */
    if (unlikely(!is_cpu_allowed(p, cpu)))
        cpu = select_fallback_rq(task_cpu(p), p);

    return cpu;
}
#endif
