
#define irq_lock_sparse() // todo
#define irq_unlock_sparse()

static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
                         enum cpuhp_state target)
{
    enum cpuhp_state prev_state;
    int ret;

    prev_state = cpuhp_set_state(cpu, st, target);
    __cpuhp_kick_ap(st);
    if ((ret = st->result))
    {
        cpuhp_reset_state(cpu, st, prev_state);
        __cpuhp_kick_ap(st);
    }

    return ret;
}

static int bringup_wait_for_ap_online(unsigned int cpu)
{
    struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);

    /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
    wait_for_ap_thread(st, true);
    if (WARN_ON_ONCE((!cpu_online(cpu))))
        return -ECANCELED;

    /* Unpark the hotplug thread of the target cpu */
    kthread_unpark(st->thread);

    /*
     * SMT soft disabling on X86 requires to bring the CPU out of the
     * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
     * CPU marked itself as booted_once in notify_cpu_starting() so the
     * cpu_bootable() check will now return false if this is not the
     * primary sibling.
     */
    if (!cpu_bootable(cpu))
        return -ECANCELED;

    return 0;
}

static int bringup_cpu(unsigned int cpu)
{
    struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
    struct task_struct *idle = idle_thread_get(cpu);
    int ret;

    if (!cpuhp_can_boot_ap(cpu))
        return -EAGAIN;

    /*
     * Some architectures have to walk the irq descriptors to
     * setup the vector space for the cpu which comes online.
     *
     * Prevent irq alloc/free across the bringup by acquiring the
     * sparse irq lock. Hold it until the upcoming CPU completes the
     * startup in cpuhp_online_idle() which allows to avoid
     * intermediate synchronization points in the architecture code.
     */
    irq_lock_sparse();

    ret = __cpu_up(cpu, idle);
    if (ret)
        goto out_unlock;

    ret = cpuhp_bp_sync_alive(cpu);
    if (ret)
        goto out_unlock;

    ret = bringup_wait_for_ap_online(cpu);
    if (ret)
        goto out_unlock;

    irq_unlock_sparse();

    if (st->target <= CPUHP_AP_ONLINE_IDLE)
        return 0;

    return cpuhp_kick_ap(cpu, st, st->target);

out_unlock:
    irq_unlock_sparse();

    return ret;
}

static int finish_cpu(unsigned int cpu)
{
    return 0;
}

/************************************************************************/
static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                              enum cpuhp_state target)
{
    enum cpuhp_state prev_state = st->state;
    int ret = 0;

    ret = cpuhp_invoke_callback_range(true, cpu, st, target);
    if (ret)
    {

        cpuhp_reset_state(cpu, st, prev_state);
    }

    return ret;
}

/* Requires cpu_add_remove_lock to be held */
static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
{
    struct cpuhp_cpu_state *st = per_cpuhp_cpu_state(cpu);
    int ret = 0;

    /*
     * The caller of cpu_up() might have raced with another
     * caller. Nothing to do.
     */
    if (st->state >= target)
        goto out;

    cpuhp_set_state(cpu, st, target);

    /*
     * Try to reach the target state. We max out on the BP at
     * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
     * responsible for bringing it up to the target state.
     */
    target = min(target, CPUHP_BRINGUP_CPU);
    ret = cpuhp_up_callbacks(cpu, st, target);

out:
    return ret;
}

static int cpu_up(unsigned int cpu, enum cpuhp_state target)
{
    int err = 0;

    err = _cpu_up(cpu, 0, target);

    return err;
}

static void __init cpuhp_bringup_mask(const struct cpumask *mask, unsigned int ncpus,
                                      enum cpuhp_state target)
{
    unsigned int cpu;

    for_each_cpu(cpu, mask)
    {
        cpu_up(cpu, target);
    }
}
