
#define task_cpu_possible(cpu, p)	true

#ifdef CONFIG_SMP
/*
 * sched_class::set_cpus_allowed must do the below, but is not required to
 * actually call this function.
 */
void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
{
    cpumask_copy(&p->cpus_mask, ctx->new_mask);
    p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);

    // todo
}

static void __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
{
    struct rq *rq = task_rq(p);
    bool queued, running;

    /*
     * This here violates the locking rules for affinity, since we're only
     * supposed to change these variables while holding both rq->lock and
     * p->pi_lock.
     *
     * HOWEVER, it magically works, because ttwu() is the only code that
     * accesses these variables under p->pi_lock and only does so after
     * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
     * before finish_task().
     *
     * XXX do further audits, this smells like something putrid.
     */
    // todo

    queued = task_on_rq_queued(p);
    running = task_current_donor(rq, p);

    if (queued)
    {
        /*
         * Because __kthread_bind() calls this on blocked tasks without
         * holding rq->lock.
         */
        lockdep_assert_rq_held(rq);
        dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
    }
    if (running)
        put_prev_task(rq, p);

    p->sched_class->set_cpus_allowed(p, ctx);

    if (queued)
        enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
    if (running)
        set_next_task(rq, p);
}

/* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
    struct affinity_context ac = {
        .new_mask = new_mask,
        .user_mask = NULL,
        .flags = SCA_USER, /* clear the user requested mask */
    };

    __do_set_cpus_allowed(p, &ac);
}

bool task_allowed_on_cpu(struct task_struct *p, int cpu)
{
    /* When not in the task's cpumask, no point in looking further. */
    if (!cpumask_test_cpu(cpu, p->cpus_ptr))
        return false;

    /* Can @cpu run a user thread? */
    if (!(p->flags & PF_KTHREAD) && !task_cpu_possible(cpu, p))
        return false;

    return true;
}

bool kthread_is_per_cpu(struct task_struct *p)
{
    return p->kthread.is_percpu;
}

/*
 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
 * __set_cpus_allowed_ptr() and select_fallback_rq().
 */
static bool is_cpu_allowed(struct task_struct *p, int cpu)
{
    /* When not in the task's cpumask, no point in looking further. */
    if (!task_allowed_on_cpu(p, cpu))
        return false;

    /* migrate_disabled() must be allowed to finish. */
    if (is_migration_disabled(p))
        return cpu_online(cpu);

    /* Non kernel threads are not allowed during either online or offline. */
    if (!(p->flags & PF_KTHREAD))
        return cpu_active(cpu);

    /* KTHREAD_IS_PER_CPU is always allowed. */
    if (kthread_is_per_cpu(p))
        return cpu_online(cpu);

    /* Regular kernel threads don't get to stay during offline. */
    if (cpu_dying(cpu))
        return false;

    /* But are allowed during online. */
    return cpu_online(cpu);
}
#else
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
}
#endif
