#include <linux/sched/hotplug.h>

#ifdef CONFIG_SMP
static inline void sched_smt_present_inc(int cpu)
{
}

static inline void cpuset_cpu_active(void)
{
}

static inline void sched_update_numa(int cpu, bool online) {}

static inline void sched_domains_numa_masks_set(unsigned int cpu) { }

static inline void sched_set_rq_online(struct rq *rq, int cpu)
{
    struct rq_flags rf;

    rq_lock_irqsave(rq, &rf);
    if (rq->rd)
    {
        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
        set_rq_online(rq);
    }
    rq_unlock_irqrestore(rq, &rf);
}

int sched_cpu_activate(unsigned int cpu)
{
    struct rq *rq = cpu_rq(cpu);

    /*
     * Clear the balance_push callback and prepare to schedule
     * regular tasks.
     */
    balance_push_set(cpu, false);

    /*
     * When going up, increment the number of cores with SMT present.
     */
    sched_smt_present_inc(cpu);
    set_cpu_active(cpu, true);

    if (sched_smp_initialized)
    {
        sched_update_numa(cpu, true);
        sched_domains_numa_masks_set(cpu);
        cpuset_cpu_active();
    }

    scx_rq_activate(rq);

    /*
     * Put the rq online, if not already. This happens:
     *
     * 1) In the early boot process, because we build the real domains
     *    after all CPUs have been brought up.
     *
     * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
     *    domains.
     */
    sched_set_rq_online(rq, cpu);

    return 0;
}

int sched_cpu_deactivate(unsigned int cpu)
{
    return 0;
}
#else

#endif
