/*
 * Generic helpers for smp ipi calls
 *
 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
 */
#include <seminix/start_kernel.h>
#include <seminix/linkage.h>
#include <seminix/smp.h>
#include <seminix/cpu.h>
#include <seminix/mmap.h>
#include <seminix/init.h>
#include <seminix/percpu.h>
#include <seminix/of.h>
#include <seminix/cpu.h>
#include <seminix/tcb.h>
#include <seminix/delay.h>
#include <seminix/interrupt.h>
#include <seminix/irq/irq_regs.h>
#include <asm/cputype.h>
#include <asm/cpu_ops.h>
#include <asm/ptrace.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h>
#include <asm/mmu_context.h>
#include <asm/system_misc.h>

DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);

/*
 * as from 2.5, kernels no longer have an init_tasks structure
 * so we need some other way of telling a new secondary core
 * where to place its SVC stack
 */
struct secondary_data secondary_data;

enum ipi_msg_type {
    IPI_RESCHEDULE,
    IPI_CALL_FUNC,
    IPI_CPU_STOP,
    NR_IPI
};

static inline int op_cpu_kill(unsigned int cpu)
{
    return -ENOSYS;
}

/*
 * Boot a secondary CPU, and assign it the specified idle task.
 * This also gives us the initial stack to use for this CPU.
 */
static int boot_secondary(unsigned int cpu, struct tcb *idle)
{
    if (cpu_ops[cpu]->cpu_boot)
        return cpu_ops[cpu]->cpu_boot(cpu);

    return -EOPNOTSUPP;
}

static __initdata int cpu_running[CONFIG_NR_CPUS] = { 0 };

#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"

int __cpu_up(int cpu, struct tcb *idle)
{
    int ret;
    long status;

    /*
     * We need to tell the secondary core where to find its stack and the
     * page tables.
     */
    secondary_data.task = idle;
    secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
    update_cpu_boot_status(CPU_MMU_OFF);
    __flush_dcache_area(&secondary_data, sizeof(secondary_data));

    /*
     * Now bring the CPU into our world.
     */
    ret = boot_secondary(cpu, idle);
    if (ret == 0) {
        int count = USEC_PER_SEC;
        /*
         * CPU was successfully started, wait for it to come online or
         * time out.
         */
        while (count--) {
            if (cpu_running[cpu])
                break;
            ndelay(1000);
        }

        if (!cpu_online(cpu)) {
            pr_crit("CPU%u: failed to come online\n", cpu);
            ret = -EIO;
        }
    } else {
        pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
        return ret;
    }

    secondary_data.task = NULL;
    secondary_data.stack = NULL;
    status = READ_ONCE(secondary_data.status);
    if (ret && status) {

        if (status == CPU_MMU_OFF)
            status = READ_ONCE(__early_cpu_boot_status);

        switch (status & CPU_BOOT_STATUS_MASK) {
        default:
            pr_err("CPU%u: failed in unknown state : 0x%lx\n",
                    cpu, status);
            break;
        case CPU_KILL_ME:
            if (!op_cpu_kill(cpu)) {
                pr_crit("CPU%u: died during early boot\n", cpu);
                break;
            }
            /* Fall through */
            pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
        case CPU_STUCK_IN_KERNEL:
            pr_crit("CPU%u: is stuck in kernel\n", cpu);
            if (status & CPU_STUCK_REASON_52_BIT_VA)
                pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
            if (status & CPU_STUCK_REASON_NO_GRAN)
                pr_crit("CPU%u: does not support %luK granule \n", cpu, UTILS_PAGE_SIZE / SZ_1K);
            break;
        case CPU_PANIC_KERNEL:
            panic("CPU%u detected unsupported configuration\n", cpu);
        }
    }

    return ret;
}

#pragma GCC diagnostic pop

/*
 * This is the secondary CPU boot entry.  We're using this CPUs
 * idle thread stack, but a set of temporary page tables.
 */
asmlinkage notrace void secondary_start_kernel(void)
{
	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
	unsigned int cpu;

	cpu = task_cpu(current);
	set_my_cpu_offset(per_cpu_offset(cpu));

    mmget(current->mm);

	/*
	 * TTBR0 is only used for the identity mapping at this stage. Make it
	 * point to zero page to avoid speculatively fetching new entries.
	 */
	cpu_uninstall_idmap();

    preempt_disable();

	/*
	 * If the system has established the capabilities, make sure
	 * this CPU ticks all of those. If it doesn't, the CPU will
	 * fail to come online.
	 */
	check_local_cpu_capabilities();

	if (cpu_ops[cpu]->cpu_postboot)
		cpu_ops[cpu]->cpu_postboot();

	/*
	 * Log the CPU info before it is marked online and might get read.
	 */
	cpuinfo_store_cpu();

	/*
	 * Enable GIC and timers.
	 */
	notify_cpu_starting(cpu);

	/*
	 * OK, now it's safe to let the boot CPU continue.  Wait for
	 * the CPU migration code to notice that the CPU is online
	 * before we continue.
	 */
	pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n",
					 cpu, (unsigned long)mpidr,
					 read_cpuid_id());
	update_cpu_boot_status(CPU_BOOT_SUCCESS);
	set_cpu_online(cpu, true);
    cpu_running[cpu] = 1;

    system_tick_init();

    local_daif_restore(DAIF_PROCCTX);

	/*
	 * OK, it's off to the idle thread for us
	 */
	cpu_startup_entry();
}

/*
 * Kill the calling secondary CPU, early in bringup before it is turned
 * online.
 */
void cpu_die_early(void)
{
	int cpu = smp_processor_id();

	pr_crit("CPU%d: will not boot\n", cpu);

	/* Mark this CPU absent */
	set_cpu_online(cpu, false);

	update_cpu_boot_status(CPU_STUCK_IN_KERNEL);

	cpu_park_loop();
}

static void __init hyp_mode_check(void)
{
	if (is_boot_el2())
		pr_info("CPU: All CPU(s) started at EL2\n");
	else if (is_hyp_mode_mismatched())
		pr_warn("CPU: CPUs started in inconsistent modes");
	else
		pr_info("CPU: All CPU(s) started at EL1\n");
}

void __init smp_cpus_done(void)
{
	pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
	setup_cpu_features();
	hyp_mode_check();
	mark_linear_text_alias_ro();
}

void __init smp_prepare_boot_cpu(void)
{
	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
	cpuinfo_store_boot_cpu();
}

static u64 __init of_get_cpu_mpidr(struct device_node *dn)
{
    const __be32 *cell;
    u64 hwid;

    /*
     * A cpu node with missing "reg" property is
     * considered invalid to build a cpu_logical_map
     * entry.
     */
    cell = of_get_property(dn, "reg", NULL);
    if (!cell) {
        pr_err("%pOF: missing reg property\n", dn);
        return INVALID_HWID;
    }

    hwid = of_read_number(cell, of_n_addr_cells(dn));
    /*
     * Non affinity bits must be set to 0 in the DT
     */
    if (hwid & ~MPIDR_HWID_BITMASK) {
        pr_err("%pOF: invalid reg property\n", dn);
        return INVALID_HWID;
    }
    return hwid;
}

/*
 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
 * entries and check for duplicates. If any is found just ignore the
 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
 * matching valid MPIDR values.
 */
static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
{
    unsigned int i;

    for (i = 1; (i < cpu) && (i < CONFIG_NR_CPUS); i++)
        if (cpu_logical_map(i) == hwid)
            return true;
    return false;
}

static bool bootcpu_valid __initdata;
static int cpu_count = 1;

/*
 * Enumerate the possible CPU set from the device tree and build the
 * cpu logical map array containing MPIDR values related to logical
 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
 */
static void __init of_parse_and_init_cpus(void)
{
    struct device_node *dn;

    for_each_of_cpu_node(dn) {
        u64 hwid = of_get_cpu_mpidr(dn);

        if (hwid == INVALID_HWID)
            goto next;

        if (is_mpidr_duplicate(cpu_count, hwid)) {
            pr_err("%pOF: duplicate cpu reg properties in the DT\n",
                dn);
            goto next;
        }

        /*
         * The numbering scheme requires that the boot CPU
         * must be assigned logical id 0. Record it so that
         * the logical map built from DT is validated and can
         * be used.
         */
        if (hwid == cpu_logical_map(0)) {
            if (bootcpu_valid) {
                pr_err("%pOF: duplicate boot cpu reg property in DT\n",
                    dn);
                goto next;
            }

            bootcpu_valid = true;

            /*
             * cpu_logical_map has already been
             * initialized and the boot cpu doesn't need
             * the enable-method so continue without
             * incrementing cpu.
             */
            continue;
        }

        if (cpu_count >= CONFIG_NR_CPUS)
            goto next;

        pr_debug("cpu logical map 0x%llx\n", hwid);
        cpu_logical_map(cpu_count) = hwid;

next:
        cpu_count++;
    }
}

/*
 * Initialize cpu operations for a logical cpu and
 * set it in the possible mask on success
 */
static int __init smp_cpu_setup(int cpu)
{
    if (cpu_read_ops(cpu))
        return -ENODEV;

    if (cpu_ops[cpu]->cpu_init(cpu))
        return -ENODEV;

    set_cpu_possible(cpu, true);

    return 0;
}

void __init smp_init_cpus(void)
{
    int i;

    of_parse_and_init_cpus();

    if (cpu_count > nr_cpu_ids)
        pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n",
            cpu_count, nr_cpu_ids);

    if (!bootcpu_valid) {
        pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
        return;
    }

    /*
     * We need to set the cpu_logical_map entries before enabling
     * the cpus so that cpu processor description entries (DT cpu nodes
     * and ACPI MADT entries) can be retrieved by matching the cpu hwid
     * with entries in cpu_logical_map while initializing the cpus.
     * If the cpu set-up fails, invalidate the cpu_logical_map entry.
     */
    for (i = 1; i < nr_cpu_ids; i++) {
        if (cpu_logical_map(i) != INVALID_HWID) {
            if (smp_cpu_setup(i))
                cpu_logical_map(i) = INVALID_HWID;
        }
    }
}

void __init smp_prepare_cpus(void)
{
	int err, cpu;

	/*
	 * Initialise the present map (which describes the set of CPUs
	 * actually populated at the present time) and release the
	 * secondaries from the bootloader.
	 */
	for_each_possible_cpu(cpu) {
		per_cpu(cpu_number, cpu) = cpu;

		if (cpu == smp_processor_id())
			continue;

		if (!cpu_ops[cpu])
			continue;

		err = cpu_ops[cpu]->cpu_prepare(cpu);
		if (err) {
            set_cpu_possible(cpu, false);
			continue;
        }
	}
}

void (*__smp_cross_call)(const struct cpumask *, int);

void __init set_smp_cross_call(void (*fn)(const struct cpumask *, int))
{
    __smp_cross_call = fn;
}

static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
    __smp_cross_call(target, ipinr);
}

void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
    smp_cross_call(mask, IPI_CALL_FUNC);
}

void arch_send_call_function_single_ipi(int cpu)
{
    smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
}

/*
 * ipi_cpu_stop - handle IPI from smp_send_stop()
 */
static void ipi_cpu_stop(unsigned int cpu)
{
    set_cpu_online(cpu, false);

    local_daif_mask();

    while (1)
        cpu_relax();
}

/*
 * Main handler for inter-processor interrupts
 */
void handle_IPI(int ipinr, struct pt_regs *regs)
{
	int cpu = smp_processor_id();
	struct pt_regs *old_regs = set_irq_regs(regs);

    switch (ipinr) {
    case IPI_RESCHEDULE:
        scheduler_ipi();
        break;

    case IPI_CALL_FUNC:
		irq_enter();
		generic_smp_call_function_interrupt();
		irq_exit();
		break;

    case IPI_CPU_STOP:
		irq_enter();
		ipi_cpu_stop(cpu);
		irq_exit();
		break;

    default:
        pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
        break;
    }
    set_irq_regs(old_regs);
}

void smp_send_reschedule(int cpu)
{
    smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}

void smp_send_stop(void)
{
    unsigned long timeout;

    if (num_online_cpus() > 1) {
        cpumask_t mask;

        cpumask_copy(&mask, cpu_online_mask);
        cpumask_clear_cpu(smp_processor_id(), &mask);

        if (system_state <= SYSTEM_RUNNING)
            pr_crit("SMP: stopping secondary CPUs\n");
        smp_cross_call(&mask, IPI_CPU_STOP);
    }

    /* Wait up to one second for other CPUs to stop */
    timeout = USEC_PER_SEC;
    while (num_online_cpus() > 1 && timeout--)
        udelay(1);

    if (num_online_cpus() > 1)
        pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
               cpumask_pr_args(cpu_online_mask));
}
