
/**
 * cpuhp_invoke_callback - Invoke the callbacks for a given state
 * @cpu:	The cpu for which the callback should be invoked
 * @state:	The state to do callbacks for
 * @bringup:	True if the bringup callback should be invoked
 * @node:	For multi-instance, do a single entry callback for install/remove
 * @lastp:	For multi-instance rollback, remember how far we got
 *
 * Called from cpu hotplug and from the state register machinery.
 *
 * Return: %0 on success or a negative errno code
 */
static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
                                 bool bringup, struct hlist_node *node,
                                 struct hlist_node **lastp)
{
    struct cpuhp_cpu_state *st = per_cpuhp_cpu_state(cpu);
    struct cpuhp_step *step = cpuhp_get_step(state);
    int (*cbm)(unsigned int cpu, struct hlist_node *node);
    int (*cb)(unsigned int cpu);
    int ret, cnt;

    if (st->fail == state)
    {
        st->fail = CPUHP_INVALID;
        return -EAGAIN;
    }

    if (cpuhp_step_empty(bringup, step))
    {
        WARN_ON_ONCE(1);
        return 0;
    }

    if (!step->multi_instance)
    {
        WARN_ON_ONCE(lastp && *lastp);
        cb = bringup ? step->startup.single : step->teardown.single;

        trace_cpuhp_enter(cpu, st->target, state, cb);
        ret = cb(cpu);
        trace_cpuhp_exit(cpu, st->state, state, ret);
        return ret;
    }
    cbm = bringup ? step->startup.multi : step->teardown.multi;

    /* Single invocation for instance add/remove */
    if (node)
    {
        WARN_ON_ONCE(lastp && *lastp);
        trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
        ret = cbm(cpu, node);
        trace_cpuhp_exit(cpu, st->state, state, ret);
        return ret;
    }

    /* State transition. Invoke on all instances */
    cnt = 0;
    hlist_for_each(node, &step->list)
    {
        if (lastp && node == *lastp)
            break;

        trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
        ret = cbm(cpu, node);
        trace_cpuhp_exit(cpu, st->state, state, ret);
        if (ret)
        {
            if (!lastp)
                goto err;

            *lastp = node;
            return ret;
        }
        cnt++;
    }
    if (lastp)
        *lastp = NULL;
    return 0;
err:
    /* Rollback the instances if one failed */
    cbm = !bringup ? step->startup.multi : step->teardown.multi;
    if (!cbm)
        return ret;

    hlist_for_each(node, &step->list)
    {
        if (!cnt--)
            break;

        trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
        ret = cbm(cpu, node);
        trace_cpuhp_exit(cpu, st->state, state, ret);
        /*
         * Rollback must not fail,
         */
        WARN_ON_ONCE(ret);
    }
    return ret;
}

static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
{
    struct completion *done = bringup ? &st->done_up : &st->done_down;
    wait_for_completion(done);
}

static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
{
    if (!st->single && st->state == st->target)
        return;

    st->result = 0;
    /*
     * Make sure the above stores are visible before should_run becomes
     * true. Paired with the mb() above in cpuhp_thread_fun()
     */
    smp_mb();
    st->should_run = true;
    wake_up_process(st->thread);
    wait_for_ap_thread(st, st->bringup);
}

static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
                                    struct hlist_node *node)
{
    struct cpuhp_cpu_state *st = per_cpuhp_cpu_state(cpu);
    int ret;

    if (!cpu_online(cpu))
        return 0;

    cpuhp_lock_acquire(false);
    cpuhp_lock_release(false);

    cpuhp_lock_acquire(true);
    cpuhp_lock_release(true);

    /*
     * If we are up and running, use the hotplug thread. For early calls
     * we invoke the thread function directly.
     */
    if (!st->thread)
        return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);

    st->rollback = false;
    st->last = NULL;

    st->node = node;
    st->bringup = bringup;
    st->cb_state = state;
    st->single = true;

    __cpuhp_kick_ap(st);

    /*
     * If we failed and did a partial, do a rollback.
     */
    if ((ret = st->result) && st->last)
    {
        st->rollback = true;
        st->bringup = !bringup;

        __cpuhp_kick_ap(st);
    }

    /*
     * Clean up the leftovers so the next hotplug operation wont use stale
     * data.
     */
    st->node = st->last = NULL;

    return ret;
}

static int __cpuhp_invoke_callback_range(bool bringup,
                                         unsigned int cpu,
                                         struct cpuhp_cpu_state *st,
                                         enum cpuhp_state target,
                                         bool nofail)
{
    enum cpuhp_state state;
    int ret = 0;

    while (cpuhp_next_state(bringup, &state, st, target))
    {
        int err;

        err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
        if (!err)
            continue;

        if (nofail)
        {
            pr_warn("CPU %u %s state %s (%d) failed (%d)\n",
                    cpu, bringup ? "UP" : "DOWN",
                    cpuhp_get_step(st->state)->name,
                    st->state, err);
            ret = -1;
        }
        else
        {
            ret = err;
            break;
        }
    }

    return ret;
}

static inline int cpuhp_invoke_callback_range(bool bringup,
                                              unsigned int cpu,
                                              struct cpuhp_cpu_state *st,
                                              enum cpuhp_state target)
{
    return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false);
}
