/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2014-2019.
 * Description: support core reset
 * Author: xiekunxun <xiekunxun@huawei.com>
 * Create: 2014-02-18
 */

#include <linux/init.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/smp.h>
#include <linux/hal/common.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>

#include <mach/platform.h>
#include <mach/platsmp.h>
#include <linux/io.h>
#include <mach/irqs.h>
#include <mach/slave_core.h>
#include <linux/slab.h>
#include <mach/kexport.h>
#include <linux/of.h>

#ifdef CONFIG_RTOS_HAL_DOUBLE_CLUSTER
#include <linux/hal32/double-cluster.h>
#endif

#include <linux/of_address.h>
#include <linux/hal/drol.h>

#ifdef CONFIG_RTOS_HAL_CORE_RESET_LOG
#include <linux/early_kbox.h>
#endif

#define	SC_SCTL_REMAP_CLR       (0x00000100)
static void __iomem *a9_sysctl;

static void common_a9_core_reset_prepare(void)
{
	struct device_node *np = NULL;
	np = of_find_compatible_node(NULL, NULL, "arm,sp810");
	a9_sysctl = of_iomap(np, 0);
	if (!a9_sysctl)
		pr_err("ioremap sysctl failed\n");
}

static void common_a9_core_reset(unsigned int cpu)
{
	unsigned int reg_value;
	unsigned int base_mask;

	if (!a9_sysctl) {
		pr_err("ioremap sysctl failed\n");
		return;
	}
	/* clear remap */
	reg_value = readl(a9_sysctl + REG_SC_CTRL);
	barrier();
	reg_value |= SC_SCTL_REMAP_CLR;
	barrier();
	writel(reg_value, a9_sysctl + REG_SC_CTRL);

	/*
	 * Set synchronisation state between this boot processor
	 * and the secondary one
	 */
	udelay(50);
#ifdef CONFIG_RTOS_HAL_DOUBLE_CLUSTER
#define NUM_CORES_IN_CLUSTER 4
	/* index of cores in cluster1 start from 4 */
	if (get_cluster_id() == 1)
		base_mask = 0x1 << (cpu + NUM_CORES_IN_CLUSTER);
	else
#endif
	/* reset cpu */
	base_mask = 0x1 << cpu;

	reg_value = readl(a9_sysctl + REG_SCTL_RESET);
	reg_value |= base_mask;
	writel(reg_value, a9_sysctl + REG_SCTL_RESET);

	udelay(50);

	reg_value = readl(a9_sysctl + REG_SCTL_RESET);
	reg_value &= ~base_mask;
	writel(reg_value, a9_sysctl + REG_SCTL_RESET);
}

static bool has_general_reset_info(void)
{
	struct device_node *np = NULL;

	np = of_find_compatible_node(NULL, NULL, "rtos_core_reset");
	if (!np)
		return false;
	else
		return true;
}

#define MAX_NODE_NAME_LEN 20
static char node_name[MAX_NODE_NAME_LEN];
#define OP_MAX 16
#define HAL_SHIFT_BY_CORE 1
#define CORES_PER_CLUSTER 4
static struct drol_op reset_op[OP_MAX];
static void *core_reset_op_addr[NR_CPUS][OP_MAX];
static int max_addr_shift_per_cluster;
struct drol_op_property {
	unsigned int addr_shift_per_cluster;
	unsigned int op_shift_per_cluster;
	unsigned int op_shift_per_core_in_cluster;
	unsigned int set_every_core;
};
static struct drol_op_property reset_op_property[OP_MAX];

static int get_phy_cpu_id(int cpu)
{
	if (aff_level_set())
		return mpidr_to_phyid(cpu_logical_map(cpu));
	else
		return cpu_logical_map(cpu);
}

static int get_drol_op_property(struct drol_op_property *p, int size, const char *name)
{
	int i = 0;
	struct device_node *np = NULL;
	struct device_node *node = NULL;

	if (!p || !name || !size)
		return -EINVAL;

	node = of_find_compatible_node(NULL, NULL, name);
	if (!node)
		return -ENODEV;

	for_each_child_of_node(node, np) {
		if (i >= size)
			return -ENOMEM;

		if (of_property_read_u32(np, "addr_shift_per_cluster", &(p[i].addr_shift_per_cluster)))
			p[i].addr_shift_per_cluster = 0;

		max_addr_shift_per_cluster = max_addr_shift_per_cluster > p[i].addr_shift_per_cluster ?
									max_addr_shift_per_cluster : p[i].addr_shift_per_cluster;

		if (of_property_read_u32(np, "op_shift_per_cluster", &(p[i].op_shift_per_cluster)))
			p[i].op_shift_per_cluster = 0;

		if (of_property_read_u32(np, "op_shift_per_core_in_cluster", &(p[i].op_shift_per_core_in_cluster)))
			p[i].op_shift_per_core_in_cluster = 1;

		if (of_property_read_u32(np, "set_every_core", &(p[i].set_every_core)))
			p[i].set_every_core = 0;

		i++;
	}

	if (i == size)
		return i;
	else
		return -ENODEV;
}

static int get_reset_op(struct drol_op *op, struct drol_op_property *p, int size, int cpu)
{
	int i;
	unsigned int op_shift;

	for (i = 0; i < size; i++) {
		if (!core_reset_op_addr[cpu][i])
			return -ENOMEM;
		op[i].reg += (cpu / CORES_PER_CLUSTER * p[i].addr_shift_per_cluster);
		op[i].addr = core_reset_op_addr[cpu][i] + (cpu / CORES_PER_CLUSTER * p[i].addr_shift_per_cluster);

		op_shift = (cpu / CORES_PER_CLUSTER) * p[i].op_shift_per_cluster +
			(cpu % CORES_PER_CLUSTER) * p[i].op_shift_per_core_in_cluster;
		op[i].set <<= op_shift;
		op[i].clear <<= op_shift;
		op[i].xor <<= op_shift;

		op[i].set |= p[i].set_every_core;
	}
	return 0;
}

static void *core_reset_ioremap(phys_addr_t phys)
{
	/*
	 * some board will set reset_type=1 but only use one cluster,
	 * in this case max_addr_shift_per_cluster will be 0,
	 * need give a default REG_SIZE for ioremap.
	 */
	if (max_addr_shift_per_cluster == 0)
		max_addr_shift_per_cluster = REG_SIZE;
	return ioremap(phys, NR_CPUS / CORES_PER_CLUSTER * max_addr_shift_per_cluster);
}

static void core_reset_iounmap(void *virt)
{
	iounmap(virt);
}

static void general_core_reset_prepare_shift_by_core(void)
{
	int nr, ret, i, cpu;

	snprintf(node_name, MAX_NODE_NAME_LEN, "rtos_core_reset");

	nr = drol_get_op(reset_op, OP_MAX, node_name, NULL);
	if (nr < 0) {
		printk("get reset info for failed %d, check dts\n", nr);
		return;
	}

	ret = get_drol_op_property(reset_op_property, nr, node_name);
	if (ret < 0) {
		printk("get op property for failed %d, check dts\n", ret);
		return;
	}

	ret = drol_map_op(reset_op, nr, core_reset_ioremap, core_reset_iounmap);
	if (ret) {
		printk("map reset reg for failed\n");
		return;
	}

	for_each_present_cpu(cpu) {
		for (i = 0; i < nr; i++)
			core_reset_op_addr[get_phy_cpu_id(cpu)][i] = reset_op[i].addr;
	}
}

static void general_core_reset_prepare_each_core(void)
{
	int cpu, ret, nr, i, phy_cpu;

	for_each_present_cpu(cpu) {
		phy_cpu = get_phy_cpu_id(cpu);
		/* main core do not have rtos_core_0 msg. */
		if (phy_cpu == 0)
			continue;

		snprintf(node_name, MAX_NODE_NAME_LEN, "rtos_core_%d", phy_cpu);
		nr = drol_get_op(reset_op, OP_MAX, node_name, NULL);
		if (nr < 0) {
			printk("get reset info for cpu %d failed %d, check dts\n", phy_cpu, nr);
			continue;
		}
		ret = drol_map_op(reset_op, nr, DROL_IOREMAP, DROL_IOUNMAP);
		if (ret) {
			printk("map reset reg for cpu %d failed\n", phy_cpu);
			continue;
		}
		for (i = 0; i < nr; i++)
			core_reset_op_addr[phy_cpu][i] = reset_op[i].addr;
	}
}

static void general_core_reset_prepare(void)
{
	int reset_type;
	struct device_node *np = NULL;

	np = of_find_compatible_node(NULL, NULL, "rtos_core_reset");
	if (of_property_read_u32(np, "reset_type", &reset_type))
		reset_type = 0;

	if (reset_type == HAL_SHIFT_BY_CORE) {
		general_core_reset_prepare_shift_by_core();
	} else {
		general_core_reset_prepare_each_core();
	}
}

static void general_core_reset(unsigned int cpu)
{
	int nr, ret, reset_type, i;
	struct device_node *np = NULL;

	np = of_find_compatible_node(NULL, NULL, "rtos_core_reset");
	if (of_property_read_u32(np, "reset_type", &reset_type))
		reset_type = 0;

	if (reset_type == HAL_SHIFT_BY_CORE)
		snprintf(node_name, MAX_NODE_NAME_LEN, "rtos_core_reset");
	else
		snprintf(node_name, MAX_NODE_NAME_LEN, "rtos_core_%d", cpu);

	nr = drol_get_op(reset_op, OP_MAX, node_name, NULL);
	if (nr < 0) {
		printk("get reset info for cpu %d failed %d, check dts\n", cpu, nr);
		return;
	}

	if (reset_type == HAL_SHIFT_BY_CORE) {
		ret = get_drol_op_property(reset_op_property, nr, node_name);
		if (ret < 0) {
			printk("get op property for cpu %d failed %d, check dts\n", cpu, ret);
			return;
		}

		ret = get_reset_op(reset_op, reset_op_property, nr, cpu);
		if (ret) {
			printk("map reset reg for cpu %d failed\n", cpu);
			return;
		}
	} else {
		for (i = 0; i < nr; i++) {
			if (!core_reset_op_addr[cpu][i]) {
				printk("map reset reg for cpu %d failed\n", cpu);
				return;
			}
			reset_op[i].addr = core_reset_op_addr[cpu][i];
		}
	}

	drol_do_op(reset_op, nr);
}

static int __init init_platform_handle(void)
{
	int ret;
	bool use_general_reset = has_general_reset_info();

	if (use_general_reset) {
		general_core_reset_prepare();
		ret = register_core_reset_handle(general_core_reset);
	} else {
		common_a9_core_reset_prepare();
		ret = register_core_reset_handle(common_a9_core_reset);
	}

	if (ret) {
		printk("Register core reset handle error:%d\n", ret);
		return ret;
	}
#ifdef CONFIG_RTOS_HAL_CORE_RESET_LOG
	if (ekbox_pcontent) {
		memset(ekbox_pcontent, 0x20, EKBOX_RESERVE_SIZE - 1);
		memset(ekbox_pcontent + EKBOX_RESERVE_SIZE - 1, '\n', 1);
	}
#endif

	return 0;
}

early_initcall(init_platform_handle);
