/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024.
 * Description: support rtos mach-hisi
 * Author: yangzhuohao
 * Create: 2024-04-10
 */
#include <linux/smp.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/delay.h>

#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
#include <asm/smp_scu.h>
#include <asm/mach/map.h>

#include "core.h"
#include <mach/io_map.h>
#include <linux/of_fdt.h>
#include <mach/platsmp.h>

#if defined(CONFIG_OF)

static enum {
	GENERIC_SCU,
	CORTEX_A9_SCU,
} hisi_dt_scu __initdata = GENERIC_SCU;

static struct map_desc hisi_dt_cortex_a9_scu_map __initdata = {
	.virtual        = V2T_PERIPH,
	/* .pfn set in hisi_dt_find_scu() */
	.length         = SZ_128,
	.type           = MT_DEVICE,
};

static void *hisi_dt_cortex_a9_scu_base __initdata;

static const char *hisi_dt_cortex_a9_match[] __initconst = {
	"arm,cortex-a5-scu",
	"arm,cortex-a9-scu",
	NULL
};

static int __init hisi_dt_find_scu(unsigned long node,
				const char *uname, int depth, void *data)
{
	if (of_flat_dt_match(node, hisi_dt_cortex_a9_match)) {
		phys_addr_t phys_addr;
		const __be32 *reg = of_get_flat_dt_prop(node, "reg", NULL);

		if (WARN_ON(!reg))
			return -EINVAL;

		phys_addr = be32_to_cpup(reg);
		hisi_dt_scu = CORTEX_A9_SCU;

		hisi_dt_cortex_a9_scu_map.pfn = __phys_to_pfn(phys_addr);
		iotable_init(&hisi_dt_cortex_a9_scu_map, 1);

		hisi_dt_cortex_a9_scu_base = ioremap(phys_addr, SZ_256);
		if (WARN_ON(!hisi_dt_cortex_a9_scu_base))
			return -EFAULT;
	}

	return 0;
}

void __init hisi_dt_smp_map_io(void)
{
	if (initial_boot_params)
		WARN_ON(of_scan_flat_dt(hisi_dt_find_scu, NULL));
}

static int __init hisi_dt_cpus_num(unsigned long node, const char *uname,
				int depth, void *data)
{
	static int prev_depth = -1;
	static int nr_cpus = -1;

	if (prev_depth > depth && nr_cpus > 0)
		return nr_cpus;

	if (nr_cpus < 0 && strcmp(uname, "cpus") == 0)
		nr_cpus = 0;

	if (nr_cpus >= 0) {
		const char *device_type = of_get_flat_dt_prop(node, "device_type", NULL);

		if (device_type && strcmp(device_type, "cpu") == 0)
			nr_cpus++;
	}

	prev_depth = depth;

	return 0;
}

static void __init hisi_dt_smp_init_cpus(void)
{
	int ncores = 0, i;

#ifndef CONFIG_RTOS_HAL_BUGFIX
	switch (hisi_dt_scu) {
	case GENERIC_SCU:
		ncores = of_scan_flat_dt(hisi_dt_cpus_num, NULL);
		break;
	case CORTEX_A9_SCU:
		ncores = scu_get_core_count(hisi_dt_cortex_a9_scu_base);
		break;
	default:
		WARN_ON(1);
		break;
	}
#else
	ncores = of_scan_flat_dt(hisi_dt_cpus_num, NULL);
#endif

	if (ncores < 2)
		return;

	if (ncores > nr_cpu_ids) {
		pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
				ncores, nr_cpu_ids);
		ncores = nr_cpu_ids;
	}

	for (i = 0; i < ncores; ++i) {
		set_cpu_possible(i, true);
		set_cpu_present(i, true);
	}
}

static void __init hisi_a9_smp_prepare_cpus(unsigned int max_cpus)
{
	int i;

	switch (hisi_dt_scu) {
	case GENERIC_SCU:
		for (i = 0; i < max_cpus; i++)
			set_cpu_present(i, true);
		break;
	case CORTEX_A9_SCU:
		scu_enable(hisi_dt_cortex_a9_scu_base);
		break;
	default:
		WARN_ON(1);
		break;
	}
}

volatile void *fabric_base;

static void __init hisi_a15_smp_prepare_cpus(unsigned int max_cpus)
{
	int i;
	unsigned int l2_aux_ctrl = 0;
	struct device_node *node;

	asm volatile ("mrc p15, 1, %0, c15, c0, 0\n" : "=r" (l2_aux_ctrl));

	if (l2_aux_ctrl & (1 << 5)) {
		l2_aux_ctrl &= ~(1 << 5);
		asm volatile ("mcr p15, 1, %0, c15, c0, 0\n"
				"isb\n" : : "r" (l2_aux_ctrl));
		flush_cache_all();
	}

	node = of_find_compatible_node(NULL, NULL, "arm,fabric");
	if (!node) {
		fabric_base = 0;
	} else {
		fabric_base = (void *)of_iomap(node, 0);
		if (!fabric_base)
			pr_err("ioremap fabric addr error.\n");
	}
	for (i = 0; i < max_cpus; i++)
		set_cpu_present(i, true);
}

#else

static void __init hisi_dt_smp_init_cpus(void)
{
	WARN_ON(1);
}

void __init hisi_dt_smp_prepare_cpus(unsigned int max_cpus)
{
	WARN_ON(1);
}

#endif

/*
 * Initialise the CPU possible map early - this describes the CPUs
 * which may be present or become present in the system.
 */
static void __init hisi_smp_init_cpus(void)
{
	hisi_dt_smp_init_cpus();
}

/*
 * temporary add a cpu_hotplug function to support cpu offline
 * will implement real operation later.
 */
#ifdef CONFIG_HOTPLUG_CPU
int hisi_cpu_disable(unsigned int cpu)
{
	(void)cpu;
#ifdef CONFIG_RTOS_HAL_BUGFIX
	return 0;
#else
	pr_err("Error: BSP NOT support CPU offline now.\n");
	return -1;
#endif
}

#ifdef CONFIG_RTOS_HAL_BUGFIX
/*
 * We need a dummy function, so that platform_can_cpu_hotplug() knows
 * we support CPU hotplug.
 */
int hisi_cpu_kill(unsigned int cpu)
{
	pr_err("CPU%d: BSP NOT support CPU kill now.\n", cpu);
	return 1;
}

void hisi_cpu_die(unsigned int cpu)
{
	unsigned int v;

	flush_cache_all();

	asm volatile(
	" mrc	p15, 0, %0, c1, c0, 1\n"
	" bic	%0, %0, #(1 << 6)\n" /* disable SMP */
	" mcr	p15, 0, %0, c1, c0, 1\n"
	" mrc	p15, 0, %0, c1, c0, 0\n"
	" bic	%0, %0, #(1 << 2)\n" /* disable D-Cache */
	" mcr	p15, 0, %0, c1, c0, 0\n"
	" isb\n"
	" dsb\n"
	: "=&r" (v)
	: "r" (0)
	: "cc");

	flush_cache_all();
	cpu_do_idle();
}
#endif
#endif

struct smp_operations __initdata hisi_a9_smp_ops = {
	.smp_init_cpus          = hisi_smp_init_cpus,
	.smp_prepare_cpus       = hisi_a9_smp_prepare_cpus,
	.smp_secondary_init     = hisi_secondary_init,
#ifdef CONFIG_HOTPLUG_CPU
	.cpu_disable            = hisi_cpu_disable,
#ifdef CONFIG_RTOS_HAL_BUGFIX
	.cpu_kill               = hisi_cpu_kill,
	.cpu_die                = hisi_cpu_die,
#endif
#endif
	.smp_boot_secondary     = hisi_boot_secondary,
};

struct smp_operations __initdata hisi_a15_smp_ops = {
	.smp_init_cpus          = hisi_smp_init_cpus,
	.smp_prepare_cpus       = hisi_a15_smp_prepare_cpus,
	.smp_secondary_init     = hisi_secondary_init,
	.smp_boot_secondary     = hisi_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
	.cpu_disable            = hisi_cpu_disable,
#ifdef CONFIG_RTOS_HAL_BUGFIX
	.cpu_kill               = hisi_cpu_kill,
	.cpu_die                = hisi_cpu_die,
#endif
#endif
};
