/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2020-2021.
 * Description: cpu core reset file
 * Author: yanbo <joey.yanbo@huawei.com>
 * Create: 2020-11-19
 */
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/smp.h>
#include <linux/io.h>
#include <linux/memblock.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
#include <asm/cputype.h>
#ifdef CONFIG_RTOS_ONT_SMP_WITH_THUMB
#include <asm/fncpy.h>
#endif

#include <mach/platform.h>
#include <mach/platsmp.h>
#include <mach/irqs.h>
#include <mach/slave_core.h>

#ifdef CONFIG_RTOS_CPU_MODE
#include <linux/hal/hisilicon.h>
#endif

#include <linux/of.h>
#include <linux/of_address.h>

#ifdef CONFIG_RTOS_HAL_CORE_RESET_LOG
#include <linux/early_kbox.h>
#include "core.h"
#endif

#ifdef CONFIG_RTOS_HAL_SLAVECORE_BOOT_RETRY
#define RETRY_MAX_TIME 5
#endif

volatile int pen_release;

void hisi_reset_start(void);
void hisi_reset_start_end(void);

static void *jump_address_reset;
static void (*core_reset_handle)(unsigned int cpu);

#ifdef CONFIG_RTOS_HAL_CORE_RESET_LOG
static int core_reset_log_enabled;
static int __init parse_core_reset_log(char *p)
{
	core_reset_log_enabled = 1;
	return 1;
}
early_param("core_reset_log", parse_core_reset_log);
#endif
int register_core_reset_handle(void (*handle)(unsigned int))
{
	if (core_reset_handle) {
		printk(KERN_ERR"Handle already existed,register fail.\n");
		return -EPERM;
	}

	core_reset_handle = handle;
	return 0;
}

static unsigned int phyid_to_mpidr(unsigned int phyid)
{
	unsigned int cluster, mpidr;

	cluster = phyid / cpu_per_cluster;
	phyid = phyid % cpu_per_cluster;

	/* if not set aff level, use aff0 to store cpuid, aff1 to store clusterid by default */
	if (aff_level_set()) {
		cluster = cluster << (MPIDR_LEVEL_BITS * cluster_aff_level);
		phyid = phyid << (MPIDR_LEVEL_BITS * cpu_aff_level);
	} else {
		cluster = cluster << MPIDR_LEVEL_BITS;
	}

	mpidr = cluster | phyid;
	return mpidr;
}

unsigned int mpidr_to_phyid(unsigned int mpidr)
{
	unsigned int phyid, cluster;

	if (aff_level_set()) {
		phyid = MPIDR_AFFINITY_LEVEL(mpidr, cpu_aff_level);
		cluster = MPIDR_AFFINITY_LEVEL(mpidr, cluster_aff_level);
	} else {
		/* default level for hisilicon soc */
		phyid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
		cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	}

	return cluster * cpu_per_cluster + phyid;
}

static void set_l2cache_invalid_flag(unsigned int enable)
{
	writel(enable, jump_address_reset + HISI_BOOT_L2INVALID_STORE);

	__cpuc_flush_dcache_area(jump_address_reset, 0x1000);
	outer_clean_range(slave_boot_addr, 0x1000);
	smp_wmb();
}

/*
 * Write pen_release in a way that is guaranteed to be visible to all
 * observers, irrespective of whether they're taking part in coherency
 * or not.  This is necessary for the hotplug code to work reliably.
 */
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_RTOS_HAL_BUGFIX)
static void
#else
static void __init
#endif
write_pen_release(int val)
{
	pen_release = val;
	smp_wmb();
	__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
	outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
}

static DEFINE_SPINLOCK(boot_lock);

#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_RTOS_HAL_BUGFIX)
void
#else
void __init
#endif
hisi_secondary_init(unsigned int cpu)
{
	/*
	 * let the primary processor know we're out of the
	 * pen, then head off into the C entry point
	 */
#ifdef CONFIG_RTOS_HAL_CORE_RESET_LOG
	ekbox_record_point_fmt("[BOOT_INFO] slave core%d write pen:-1\n", cpu);
#endif
	write_pen_release(-1);

	/*
	 * Synchronise with the boot thread.
	 */
	spin_lock(&boot_lock);
	spin_unlock(&boot_lock);
}

#ifdef CONFIG_RTOS_CPU_MODE
static void prepare_slavecore_nonsec(void)
{
	writel(gic_dist_base_addr, jump_address_reset + HISI_NONSEC_GICADDR_STORE);
	writel(max_nr_irqs, jump_address_reset + HISI_NONSEC_MAXIRQ_STORE);
	writel(PHYS_OFFSET, jump_address_reset + HISI_SYS_PHYS_ADDR_STORE);
}
#endif

static void init_boot_flags(void)
{
	writel(virt_to_phys(&hisi_secondary_startup), jump_address_reset + HISI_BOOT_ADDR_OFFSET1);
	writel(HISI_BOOT_MAGIC_NUM, jump_address_reset + HISI_BOOT_MAGIC_OFFSET1);
	writel(virt_to_phys(&hisi_secondary_startup), jump_address_reset + HISI_BOOT_ADDR_OFFSET2);
	writel(HISI_BOOT_MAGIC_NUM, jump_address_reset + HISI_BOOT_MAGIC_OFFSET2);

	if (slave_boot_times == 2) {
		writel(HISI_BOOT_MAGIC_NUM, jump_address_reset + HISI_BOOT_TIMES_STORE);
	} else {
		writel(0, jump_address_reset + HISI_BOOT_TIMES_STORE);
	}

#ifdef CONFIG_RTOS_CPU_MODE
	prepare_slavecore_nonsec();
#endif

#ifdef CONFIG_RTOS_HAL_CORE_RESET_LOG
	/* if core_reset_log_enabled, we use last half part of fiq EKBOX_RESERVE_SIZE for log */
	if (core_reset_log_enabled && core_reset_log_addr)
		writel(core_reset_log_addr + EKBOX_RESERVE_SIZE / 2, jump_address_reset + HISI_BOOT_LOG_ADDR_OFFSET);
	else
		writel(0, jump_address_reset + HISI_BOOT_LOG_ADDR_OFFSET);
#endif

	/* disable the action to invalidate l2cache as default */
	set_l2cache_invalid_flag(L2INVAL_DISABLE);
	return;
}

#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_RTOS_HAL_BUGFIX)
void
#else
void __init
#endif
rtos_deploy_bootcode(void)
{
	void *core1_startup;
	unsigned int code_len;

	/*
	 * Send the secondary CPU a soft interrupt, thereby causing
	 * the boot monitor to read the system wide flags register,
	 * and branch to the address found there.
	 */
	core1_startup = &hisi_reset_start;
	code_len = &hisi_reset_start_end - &hisi_reset_start;
	if (code_len > SLAVE_CORE_BOOT_AREA_SIZE) {
		printk(KERN_ERR"Code for slave reboot is too large, do not boot up slave cores.\n");
		return;
	}
	/*
	 * Write the jump address to RAM address 0x000000fc.
	 */
	jump_address_reset = phys_to_virt(slave_boot_addr);
	if (!memblock_is_memory(slave_boot_addr))
		jump_address_reset = ioremap(slave_boot_addr, 0x1000);
	if (!jump_address_reset) {
		pr_err("Map slave_boot_addr error! slave_boot_addr=0x%pa\n", &slave_boot_addr);
		return;
	}
#ifdef CONFIG_RTOS_ONT_SMP_WITH_THUMB
	fncpy((void *)jump_address_reset, core1_startup, code_len);
#else
	memcpy((void *)jump_address_reset, core1_startup, code_len);
#endif

	init_boot_flags();

	__cpuc_flush_dcache_area(jump_address_reset, SLAVE_CORE_BOOT_AREA_SIZE);
	outer_clean_range(slave_boot_addr, SLAVE_CORE_BOOT_AREA_SIZE);
	smp_wmb();
}

#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_RTOS_HAL_BUGFIX)
int
#else
int __init
#endif
hisi_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;
	struct device_node *np;
	void __iomem *release_addr = NULL;

#ifdef CONFIG_RTOS_HAL_SLAVECORE_BOOT_RETRY
	unsigned int hard_retry_times = RETRY_MAX_TIME;
#endif
	unsigned int phy_cpu_id, mpidr;

	/*
	 *	if affinity level set, the cpu regs in dts will be considered as mpidr .
	 */
	if (aff_level_set()) {
		mpidr = cpu_logical_map(cpu);
		phy_cpu_id = mpidr_to_phyid(mpidr);
	} else {
		phy_cpu_id = cpu_logical_map(cpu);
		mpidr = phyid_to_mpidr(phy_cpu_id);
	}

	/*
	 * This is really belt and braces; we hold unintended secondary
	 * CPUs in the holding pen until we're ready for them.  However,
	 * since we haven't sent them a soft interrupt, they shouldn't
	 * be there.
	 */
#ifdef CONFIG_RTOS_HAL_CORE_RESET_LOG
	ekbox_record_point_fmt("[BOOT_INFO] main core write pen:%d(cpuid = %d)\n",
							mpidr, phy_cpu_id);
	if (!core_reset_log_enabled)
		core_reset_log_addr = 0;
	smp_wmb();
	__cpuc_flush_dcache_area((void *)&core_reset_log_addr, sizeof(core_reset_log_addr));
	outer_clean_range(__pa(&core_reset_log_addr), __pa(&core_reset_log_addr + 1));
#endif
	write_pen_release(mpidr);

	rtos_deploy_bootcode();

	np = of_find_compatible_node(NULL, NULL, "cpu-release-addr");
	if (np) {
		release_addr = of_iomap(np, 0);
		if (!release_addr)
			pr_err("fail to map cpu-release-addr\n");
	}

	/*
	 * Set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	if (core_reset_handle) {
		(*core_reset_handle)(phy_cpu_id);

		if (slave_boot_times == 2) {
			/* as the watchdog init very early, we needn't to feed dog at here */
			mdelay(1000);   /* waiting for slave core ready */
			writel(0, jump_address_reset + HISI_BOOT_TIMES_STORE);
			printk("The board need reset twice, reboot core1 twice  delay...........\n");
			__cpuc_flush_dcache_area(jump_address_reset + HISI_BOOT_TIMES_STORE, 0x4);
			outer_clean_range(slave_boot_addr + HISI_BOOT_TIMES_STORE, 0x4);

			(*core_reset_handle)(phy_cpu_id);
		}
	}

	if (release_addr)
		writel_relaxed(slave_boot_addr, release_addr);

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;

		udelay(10);
	}

#ifdef CONFIG_RTOS_HAL_SLAVECORE_BOOT_RETRY
	while (pen_release != -1 && hard_retry_times != 0) {
		hard_retry_times--;
		pr_info("SLAVE core%d not boot, try hard reset %d time...\n",
				cpu, RETRY_MAX_TIME - hard_retry_times);

		if (core_reset_handle)
			(*core_reset_handle)(phy_cpu_id);

		timeout = jiffies + (1 * HZ);
		while (time_before(jiffies, timeout)) {
			/* smp_rmb to make sure core0 read actual value for pen_release */
			smp_rmb();
			if (pen_release == -1)
				break;

			udelay(10);
		}
	}
#endif
	spin_unlock(&boot_lock);

	if (!memblock_is_memory(slave_boot_addr) && jump_address_reset)
		iounmap(jump_address_reset);
	if (release_addr)
		iounmap(release_addr);
	return pen_release != -1 ? -ENOSYS : 0;
}
