#include <linux/smpboot.h>

static __init void cpuhp_init_state(void)
{
    struct cpuhp_cpu_state *st;
    int cpu;

    for_each_possible_cpu(cpu)
    {
        st = per_cpuhp_cpu_state(cpu);
        init_completion(&st->done_up);
        init_completion(&st->done_down);
    }
}

/*
 * The cpu hotplug threads manage the bringup and teardown of the cpus
 */
static int cpuhp_should_run(unsigned int cpu)
{
    struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);

    return st->should_run;
}

/*
 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
 */
static bool cpuhp_is_atomic_state(enum cpuhp_state state)
{
    return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
}

static void lockdep_acquire_cpus_lock(void)
{

}

static void lockdep_release_cpus_lock(void)
{

}

static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
{
    struct completion *done = bringup ? &st->done_up : &st->done_down;

    complete(done);
}

/*
 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
 * callbacks when a state gets [un]installed at runtime.
 *
 * Each invocation of this function by the smpboot thread does a single AP
 * state callback.
 *
 * It has 3 modes of operation:
 *  - single: runs st->cb_state
 *  - up:     runs ++st->state, while st->state < st->target
 *  - down:   runs st->state--, while st->state > st->target
 *
 * When complete or on error, should_run is cleared and the completion is fired.
 */
static void cpuhp_thread_fun(unsigned int cpu)
{
    struct cpuhp_cpu_state *st = per_cpuhp_cpu_state(cpu);
    bool bringup = st->bringup;
    enum cpuhp_state state;

    if (WARN_ON_ONCE(!st->should_run))
        return;

    /*
     * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
     * that if we see ->should_run we also see the rest of the state.
     */
    smp_mb();

    /*
     * The BP holds the hotplug lock, but we're now running on the AP,
     * ensure that anybody asserting the lock is held, will actually find
     * it so.
     */
    lockdep_acquire_cpus_lock();
    cpuhp_lock_acquire(bringup);

    if (st->single)
    {
        state = st->cb_state;
        st->should_run = false;
    }
    else
    {
        st->should_run = cpuhp_next_state(bringup, &state, st, st->target);
        if (!st->should_run)
            goto end;
    }

    WARN_ON_ONCE(!cpuhp_is_ap_state(state));

    if (cpuhp_is_atomic_state(state))
    {
        local_irq_disable();
        st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
        local_irq_enable();

        /*
         * STARTING/DYING must not fail!
         */
        WARN_ON_ONCE(st->result);
    }
    else
    {
        st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
    }

    if (st->result)
    {
        /*
         * If we fail on a rollback, we're up a creek without no
         * paddle, no way forward, no way back. We loose, thanks for
         * playing.
         */
        WARN_ON_ONCE(st->rollback);
        st->should_run = false;
    }

end:
    cpuhp_lock_release(bringup);
    lockdep_release_cpus_lock();

    if (!st->should_run)
        complete_ap_thread(st, bringup);
}

static struct smp_hotplug_thread cpuhp_threads = {
    .store = &cpuhp_state.thread,
    .thread_should_run = cpuhp_should_run,
    .thread_fn = cpuhp_thread_fun,
    .thread_comm = "cpuhp/%u",
    .selfparking = true,
};
