/*
 * Generic helpers for smp ipi calls
 *
 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
 */
#define pr_fmt(fmt) KBUILD_BASENAME ": " fmt

#include <seminix/cache.h>
#include <seminix/smp.h>
#include <seminix/mm_types.h>
#include <seminix/init.h>
#include <seminix/cpumask.h>
#include <seminix/percpu.h>
#include <seminix/spinlock.h>
#include <seminix/slab.h>
#include <seminix/sched/idle.h>
#include <asm/system_misc.h>

/* Setup number of possible processor ids */
int nr_cpu_ids __read_mostly = CONFIG_NR_CPUS;

/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
void __init setup_nr_cpu_ids(void)
{
    nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask), CONFIG_NR_CPUS) + 1;
}

extern struct task_struct *idle_task_init_once(int cpu);

static int __init bringup_cpu(int cpu)
{
    struct task_struct *idle = idle_task_init_once(cpu);

    if (!idle)
        panic("Idle/%d tasks cannot be created.\n", cpu);

    /* Arch-specific enabling code. */
    return __cpu_up(cpu, idle);
}

/* Called by boot processor to activate the rest. */
void __init smp_init(void)
{
    int cpu, num_cpus;

    //idle_task_init_once(smp_processor_id());
    preempt_disable();

    pr_info("Bringing up secondary CPUs ...\n");

    /* FIXME: This should be done in userspace --RR */
    for_each_possible_cpu(cpu) {
        if (!cpu_online(cpu))
            bringup_cpu(cpu);
    }

    num_cpus  = num_online_cpus();
    pr_info("Brought up %d CPU%s\n",
        num_cpus,  (num_cpus  > 1 ? "s" : ""));

    /* Any cleanup work */
    smp_cpus_done();
}

enum {
    CSD_FLAG_LOCK		= 0x01,
    CSD_FLAG_SYNCHRONOUS	= 0x02,
};

struct call_function_data {
    call_single_data_t	__percpu *csd;
    cpumask_t		cpumask;
    cpumask_t		cpumask_ipi;
};

static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
static DEFINE_PER_CPU(call_single_data_t, csdata);

static void flush_smp_call_function_queue(bool warn_cpu_offline);

static int smpcfd_prepare_cpu(unsigned int cpu)
{
    struct call_function_data *cfd = &per_cpu(cfd_data, cpu);

    cpumask_clear(&cfd->cpumask);
    cpumask_clear(&cfd->cpumask_ipi);

    cfd->csd = &csdata;

    return 0;
}

void __init call_function_init(void)
{
    int i;

    for_each_possible_cpu(i) {
        init_llist_head(&per_cpu(call_single_queue, i));
        smpcfd_prepare_cpu(i);
    }
}

/*
 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
 *
 * For non-synchronous ipi calls the csd can still be in use by the
 * previous function call. For multi-cpu calls its even more interesting
 * as we'll have to ensure no other cpu is observing our csd.
 */
static __always_inline void csd_lock_wait(call_single_data_t *csd)
{
    smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
}

static __always_inline void csd_lock(call_single_data_t *csd)
{
    csd_lock_wait(csd);
    csd->flags |= CSD_FLAG_LOCK;

    /*
     * prevent CPU from reordering the above assignment
     * to ->flags with any subsequent assignments to other
     * fields of the specified call_single_data_t structure:
     */
    smp_wmb();
}

static __always_inline void csd_unlock(call_single_data_t *csd)
{
    WARN_ON(!(csd->flags & CSD_FLAG_LOCK));

    /*
     * ensure we're all done before releasing data:
     */
    smp_store_release(&csd->flags, 0);
}

static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);

/*
 * Insert a previously allocated call_single_data_t element
 * for execution on the given CPU. data must already have
 * ->func, ->info, and ->flags set.
 */
static int generic_exec_single(int cpu, call_single_data_t *csd,
                   smp_call_func_t func, void *info)
{
    if (cpu == smp_processor_id()) {
        unsigned long flags;

        /*
         * We can unlock early even for the synchronous on-stack case,
         * since we're doing this from the same CPU..
         */
        csd_unlock(csd);
        local_irq_save(flags);
        func(info);
        local_irq_restore(flags);
        return 0;
    }


    if ((int)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
        csd_unlock(csd);
        return -ENXIO;
    }

    csd->func = func;
    csd->info = info;

    /*
     * The list addition should be visible before sending the IPI
     * handler locks the list to pull the entry off it because of
     * normal cache coherency rules implied by spinlocks.
     *
     * If IPIs can go out of order to the cache coherency protocol
     * in an architecture, sufficient synchronisation should be added
     * to arch code to make it appear to obey cache coherency WRT
     * locking and barrier primitives. Generic code isn't really
     * equipped to do the right thing...
     */
    if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
        arch_send_call_function_single_ipi(cpu);

    return 0;
}

/**
 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
 *
 * Invoked by arch to handle an IPI for call function single.
 * Must be called with interrupts disabled.
 */
void generic_smp_call_function_single_interrupt(void)
{
    flush_smp_call_function_queue(true);
}

/**
 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
 *
 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
 *		      offline CPU. Skip this check if set to 'false'.
 *
 * Flush any pending smp-call-function callbacks queued on this CPU. This is
 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
 * to ensure that all pending IPI callbacks are run before it goes completely
 * offline.
 *
 * Loop through the call_single_queue and run all the queued callbacks.
 * Must be called with interrupts disabled.
 */
static void flush_smp_call_function_queue(bool warn_cpu_offline)
{
    struct llist_head *head;
    struct llist_node *entry;
    call_single_data_t *csd, *csd_next;
    static bool warned;

    head = this_cpu_ptr(&call_single_queue);
    entry = llist_del_all(head);
    entry = llist_reverse_order(entry);

    /* There shouldn't be any pending callbacks on an offline CPU. */
    if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
             !warned && !llist_empty(head))) {
        warned = true;
        WARN(1, "IPI on offline CPU %d\n", smp_processor_id());

        /*
         * We don't have to use the _safe() variant here
         * because we are not invoking the IPI handlers yet.
         */
        llist_for_each_entry(csd, entry, llist)
            pr_warn("IPI callback %pS sent to offline CPU\n",
                csd->func);
    }

    llist_for_each_entry_safe(csd, csd_next, entry, llist) {
        smp_call_func_t func = csd->func;
        void *info = csd->info;

        /* Do we wait until *after* callback? */
        if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
            func(info);
            csd_unlock(csd);
        } else {
            csd_unlock(csd);
            func(info);
        }
    }
}

/*
 * smp_call_function_single - Run a function on a specific CPU
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait until function has completed on other CPUs.
 *
 * Returns 0 on success, else a negative status code.
 */
int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
                 int wait)
{
    call_single_data_t *csd;
    call_single_data_t csd_stack = {
        .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS,
    };
    int this_cpu;
    int err;

    /*
     * prevent preemption and reschedule on another processor,
     * as well as CPU removal
     */
    this_cpu = get_cpu();

    /*
     * Can deadlock when called with interrupts disabled.
     * We allow cpu's that are not yet online though, as no one else can
     * send smp call function interrupt to this cpu and as such deadlocks
     * can't happen.
     */
    WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled());

    csd = &csd_stack;
    if (!wait) {
        csd = this_cpu_ptr(&csd_data);
        csd_lock(csd);
    }

    err = generic_exec_single(cpu, csd, func, info);

    if (wait)
        csd_lock_wait(csd);

    put_cpu();

    return err;
}

/**
 * smp_call_function_single_async(): Run an asynchronous function on a
 * 			         specific CPU.
 * @cpu: The CPU to run on.
 * @csd: Pre-allocated and setup data structure
 *
 * Like smp_call_function_single(), but the call is asynchonous and
 * can thus be done from contexts with disabled interrupts.
 *
 * The caller passes his own pre-allocated data structure
 * (ie: embedded in an object) and is responsible for synchronizing it
 * such that the IPIs performed on the @csd are strictly serialized.
 *
 * NOTE: Be careful, there is unfortunately no current debugging facility to
 * validate the correctness of this serialization.
 */
int smp_call_function_single_async(int cpu, call_single_data_t *csd)
{
    int err = 0;

    preempt_disable();

    /* We could deadlock if we have to wait here with interrupts disabled! */
    if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
        csd_lock_wait(csd);

    csd->flags = CSD_FLAG_LOCK;
    smp_wmb();

    err = generic_exec_single(cpu, csd, csd->func, csd->info);
    preempt_enable();

    return err;
}

/*
 * smp_call_function_any - Run a function on any of the given cpus
 * @mask: The mask of cpus it can run on.
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait until function has completed.
 *
 * Returns 0 on success, else a negative status code (if no cpus were online).
 *
 * Selection preference:
 *	1) current cpu if in @mask
 *	2) any cpu of current node if in @mask
 *	3) any other online cpu in @mask
 */
int smp_call_function_any(const struct cpumask *mask,
              smp_call_func_t func, void *info, int wait)
{
    int cpu;
    const struct cpumask *nodemask;
    int ret;

    /* Try for same CPU (cheapest) */
    cpu = get_cpu();
    if (cpumask_test_cpu(cpu, mask))
        goto call;

    /* Try for same node. */
    nodemask = cpu_online_mask;
    for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
         cpu = cpumask_next_and(cpu, nodemask, mask)) {
        if (cpu_online(cpu))
            goto call;
    }

    /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
    cpu = cpumask_any_and(mask, cpu_online_mask);
call:
    ret = smp_call_function_single(cpu, func, info, wait);
    put_cpu();
    return ret;
}

/**
 * smp_call_function_many(): Run a function on a set of other CPUs.
 * @mask: The set of cpus to run on (only runs on online subset).
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait (atomically) until function has completed
 *        on other CPUs.
 *
 * If @wait is true, then returns once @func has returned.
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler. Preemption
 * must be disabled when calling this function.
 */
void smp_call_function_many(const struct cpumask *mask,
                smp_call_func_t func, void *info, bool wait)
{
    struct call_function_data *cfd;
    int cpu, next_cpu, this_cpu = smp_processor_id();

    /*
     * Can deadlock when called with interrupts disabled.
     * We allow cpu's that are not yet online though, as no one else can
     * send smp call function interrupt to this cpu and as such deadlocks
     * can't happen.
     */
    WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled());

    /* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
    cpu = cpumask_first_and(mask, cpu_online_mask);
    if (cpu == this_cpu)
        cpu = cpumask_next_and(cpu, mask, cpu_online_mask);

    /* No online cpus?  We're done. */
    if (cpu >= nr_cpu_ids)
        return;

    /* Do we have another CPU which isn't us? */
    next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
    if (next_cpu == this_cpu)
        next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);

    /* Fastpath: do that cpu by itself. */
    if (next_cpu >= nr_cpu_ids) {
        smp_call_function_single(cpu, func, info, wait);
        return;
    }

    cfd = this_cpu_ptr(&cfd_data);

    cpumask_and(&cfd->cpumask, mask, cpu_online_mask);
    __cpumask_clear_cpu(this_cpu, &cfd->cpumask);

    /* Some callers race with other cpus changing the passed mask */
    if (unlikely(!cpumask_weight(&cfd->cpumask)))
        return;

    cpumask_clear(&cfd->cpumask_ipi);
    for_each_cpu(cpu, &cfd->cpumask) {
        call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);

        csd_lock(csd);
        if (wait)
            csd->flags |= CSD_FLAG_SYNCHRONOUS;
        csd->func = func;
        csd->info = info;
        if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
            __cpumask_set_cpu(cpu, &cfd->cpumask_ipi);
    }

    /* Send a message to all CPUs in the map */
    arch_send_call_function_ipi_mask(&cfd->cpumask_ipi);

    if (wait) {
        for_each_cpu(cpu, &cfd->cpumask) {
            call_single_data_t *csd;

            csd = per_cpu_ptr(cfd->csd, cpu);
            csd_lock_wait(csd);
        }
    }
}

/**
 * smp_call_function(): Run a function on all other CPUs.
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait (atomically) until function has completed
 *        on other CPUs.
 *
 * Returns 0.
 *
 * If @wait is true, then returns once @func has returned; otherwise
 * it returns just before the target cpu calls @func.
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler.
 */
int smp_call_function(smp_call_func_t func, void *info, int wait)
{
    preempt_disable();
    smp_call_function_many(cpu_online_mask, func, info, wait);
    preempt_enable();

    return 0;
}

/*
 * Call a function on all processors.  May be used during early boot while
 * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
 * of local_irq_disable/enable().
 */
int on_each_cpu(void (*func) (void *info), void *info, int wait)
{
    unsigned long flags;
    int ret = 0;

    preempt_disable();
    ret = smp_call_function(func, info, wait);
    local_irq_save(flags);
    func(info);
    local_irq_restore(flags);
    preempt_enable();
    return ret;
}

/**
 * on_each_cpu_mask(): Run a function on processors specified by
 * cpumask, which may include the local processor.
 * @mask: The set of cpus to run on (only runs on online subset).
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait (atomically) until function has completed
 *        on other CPUs.
 *
 * If @wait is true, then returns once @func has returned.
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler.  The
 * exception is that it may be used during early boot while
 * early_boot_irqs_disabled is set.
 */
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
            void *info, bool wait)
{
    int cpu = get_cpu();

    smp_call_function_many(mask, func, info, wait);
    if (cpumask_test_cpu(cpu, mask)) {
        unsigned long flags;
        local_irq_save(flags);
        func(info);
        local_irq_restore(flags);
    }
    put_cpu();
}

static void do_nothing(void *unused)
{
}

/**
 * kick_all_cpus_sync - Force all cpus out of idle
 *
 * Used to synchronize the update of pm_idle function pointer. It's
 * called after the pointer is updated and returns after the dummy
 * callback function has been executed on all cpus. The execution of
 * the function can only happen on the remote cpus after they have
 * left the idle function which had been called via pm_idle function
 * pointer. So it's guaranteed that nothing uses the previous pointer
 * anymore.
 */
void kick_all_cpus_sync(void)
{
    /* Make sure the change is visible before we kick the cpus */
    smp_mb();
    smp_call_function(do_nothing, NULL, 1);
}

/**
 * wake_up_all_idle_cpus - break all cpus out of idle
 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
 * including idle polling cpus, for non-idle cpus, we will do nothing
 * for them.
 */
void wake_up_all_idle_cpus(void)
{
    int cpu;

    preempt_disable();
    for_each_online_cpu(cpu) {
        if (cpu == smp_processor_id())
            continue;

        wake_up_if_idle(cpu);
    }
    preempt_enable();
}
