/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2018-2019.
 * Description: dying int support for powerpc
 * Author: wangmin <wmin.wang@huawei.com>
 * Create: 2018-10-30
 */

#include <linux/init.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/of.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/reboot.h>
#include <linux/sched/clock.h>
#include <asm/rtos_dying_int.h>
#include <asm/ipic.h>
#include <linux/of_address.h>
#include <linux/hal/watchdog.h>
#ifdef CONFIG_RTOS_EARLY_KBOX
#include <linux/early_kbox.h>
#endif

#include <internal/rtos_dying.h>

static DEFINE_MUTEX(dying_register_lock);

atomic_t first_exit_critical = ATOMIC_INIT(0);
static atomic_t restart_control = ATOMIC_INIT(0);
atomic_t dying_int_mode = ATOMIC_INIT(0);
atomic_t cpu_total = ATOMIC_INIT(NR_CPUS);

static void (*before_self_refresh_fun)(void);

static struct dying_callback dying_callback_head = {0};
static struct dying_int_params g_dying_params    = {0};

static unsigned long long g_dying_int_begin;

static int set_pic_dying_irq(int enable);

static void do_dying_callback(struct pt_regs *regs);

int request_fiq(void (*dying_callback_func)(unsigned int), unsigned int fiq_num)
{
	struct dying_callback *dying_callback_node = NULL;
	struct dying_callback *new_node              = NULL;
	struct list_head *t                           = NULL;

	if (dying_callback_func == NULL) {
		printk(KERN_ERR "dying callback register func is null!\n");
		return -EINVAL;
	}

	new_node = kzalloc(sizeof(struct dying_callback), GFP_KERNEL);
	if (new_node == NULL) {
		printk(KERN_ERR "Kmalloc for dying callback node failed!\n");
		return -EINVAL;
	}

	mutex_lock(&dying_register_lock);
	list_for_each(t, &(dying_callback_head.entry)) {
		dying_callback_node = list_entry(t, struct dying_callback, entry);

		if (dying_callback_node->notify_func == dying_callback_func) {
			mutex_unlock(&dying_register_lock);
			kfree(new_node);
			printk(KERN_ERR "dying info: the func(%pS) already registered!\n", dying_callback_func);
			return -EINVAL;
		}
	}

	new_node->notify_func = dying_callback_func;
	list_add_tail_rcu(&new_node->entry, &(dying_callback_head.entry));
	mutex_unlock(&dying_register_lock);

	printk(KERN_ERR "dying info: register success %pS\n", dying_callback_func);
	return 0;
}
EXPORT_SYMBOL(request_fiq);

int free_fiq(void (*dying_callback_func)(unsigned int), unsigned int fiq_num)
{
	struct dying_callback *dying_callback_node   = NULL;
	struct list_head *t                          = NULL;
	struct list_head *n                          = NULL;

	if (dying_callback_func == NULL) {
		printk(KERN_ERR "dying callback unregister func is null!\n");
		return -EINVAL;
	}

	mutex_lock(&dying_register_lock);
	list_for_each_safe(t, n, &(dying_callback_head.entry)) {
		dying_callback_node = list_entry(t, struct dying_callback, entry);

		if (dying_callback_node->notify_func == dying_callback_func) {
			list_del_rcu(&dying_callback_node->entry);
			kfree_rcu(dying_callback_node, rcu);
			mutex_unlock(&dying_register_lock);
			printk(KERN_ERR "dying info: unregister success %pS\n", dying_callback_func);
			return 0;
		}
	}
	mutex_unlock(&dying_register_lock);
	printk(KERN_ERR "Cannot find the callback %pS\n", dying_callback_func);
	return -EINVAL;
}
EXPORT_SYMBOL(free_fiq);

int free_fiq_all(unsigned int fiq_num)
{
	struct dying_callback *dying_callback_node   = NULL;
	struct list_head *t                          = NULL;
	struct list_head *n                          = NULL;

	mutex_lock(&dying_register_lock);

	list_for_each_safe(t, n, &(dying_callback_head.entry)) {
		dying_callback_node = list_entry(t, struct dying_callback, entry);
		list_del_rcu(&dying_callback_node->entry);
		kfree_rcu(dying_callback_node, rcu);
	}

	mutex_unlock(&dying_register_lock);

	return 0;
}
EXPORT_SYMBOL(free_fiq_all);

static DEFINE_PER_CPU(struct pt_regs, per_fiq_args);
struct pt_regs *fiq_get_regs(void)
{
	int cpu = 0;

	cpu = smp_processor_id();
	return &per_cpu(per_fiq_args, cpu);
}
EXPORT_SYMBOL(fiq_get_regs);

int register_ddr_self_refresh_fun(void(*before_self_refresh)(void))
{
	rcu_assign_pointer(before_self_refresh_fun, before_self_refresh);
	return 0;
}
EXPORT_SYMBOL(register_ddr_self_refresh_fun);

unsigned long long get_dying_time(void)
{
	unsigned long long dying_int_end  = 0;
	unsigned long long dying_int_time  = 0;

	dying_int_end = sched_clock();
	if (dying_int_end > g_dying_int_begin)
		dying_int_time = dying_int_end - g_dying_int_begin;
	return dying_int_time;
}
EXPORT_SYMBOL(get_dying_time);

static void dying_int_proc(struct pt_regs *regs, const char exception_type)
{
	unsigned long long begin               = 0;
	unsigned long long end                 = 0;
	unsigned long long time_tmp            = 0;
	unsigned long long run_time_limit_nsec = 0;
#ifdef CONFIG_RTOS_EARLY_KBOX
	int cpu = raw_smp_processor_id();
#endif

	begin = g_dying_int_begin;
	/* We ensure there is 20ms left to execute bsp_nmi_func() */
	time_tmp = g_dying_params.dts_params.delay_time - SELF_REFRESH_TIME;
	run_time_limit_nsec = time_tmp * 1000000;

	do_dying_callback(regs);

	end = sched_clock();

	atomic_sub_return(1, &cpu_total);
	/* NO udelay or msleep here, IRQ already disabled. */
	while ((atomic_read(&cpu_total) > 0)
			&& ((run_time_limit_nsec - (long long)(end - begin)) > 0))
		end = sched_clock();
#ifdef CONFIG_RTOS_EARLY_KBOX
	if (ekbox_pcontent)
		*(ekbox_pcontent + cpu) = exception_type;
#endif
}

void set_dying_mode(void)
{
	atomic_set(&dying_int_mode, 1);

	if (g_dying_int_begin == 0)
		g_dying_int_begin = sched_clock();
}
bool get_dying_mode(void)
{
	if (atomic_read(&dying_int_mode) > 0) {
		return true;
	} else {
		return false;
	}
}

void before_ddr_self_refresh(void)
{
	void (*tmp_before_ddr_self_refresh)(void) = NULL;

	rcu_read_lock();
	tmp_before_ddr_self_refresh = rcu_dereference(before_self_refresh_fun);
	if (tmp_before_ddr_self_refresh != NULL)
		tmp_before_ddr_self_refresh();
	rcu_read_unlock();
}

void set_ddr_self_refresh(void)
{
	if ((g_dying_params.dts_params.ddr_self_refresh == 0)
			|| (g_dying_params.ddr_ctrl_addr == 0)) {
		printk(KERN_ERR "[DYING]: set_ddr_self_refresh failed = %d\n",
				g_dying_params.dts_params.ddr_self_refresh);
		return;
	}

	smp_mb();

	*(volatile unsigned int *)(g_dying_params.ddr_ctrl_addr +
			g_dying_params.dts_params.ddr_cfg_offset) |=
		DDR_CTRL_SET_SELF_REFRESH_MODE;

	smp_mb();
}

bool get_dying_status(void)
{
	if (g_dying_params.status == DYING_DISABLE) {
		return false;
	} else {
		return true;
	}
}

void do_ddr_self_refresh(void)
{
	if (atomic_add_return(1, &first_exit_critical) == 1) {
		/* first exit critical exception core */
		before_ddr_self_refresh();
		set_ddr_self_refresh();
	}
}

/* ext-watchdog time out process */
void __noreturn dying_watchdog_proc(struct pt_regs *regs, const char exception_type)
{
	dying_int_proc(regs, exception_type);
	do_ddr_self_refresh();
	smp_mb();
	while (1) {
	};
}

void __noreturn dying_and_reboot_process(struct pt_regs *regs, const char exception_type)
{
	dying_int_proc(regs, exception_type);

	if (atomic_add_return(1, &restart_control) == 1)
		emergency_restart();
	while (1) {
	};
}

/*
 * this function must be call in interrupt context, because we must
 * get pt_regs by get_irq_regs.
 */
void dying_from_soft_watchdog(void)
{
	struct pt_regs *regs = NULL;

	/*
	 * dying_and_reboot_process will reboot system and never return.
	 * so local_irq_enable isn't necessary after dying_and_reboot_process.
	 */
	local_irq_disable();
	set_dying_mode();
	regs = get_irq_regs();
	/*
	 * excuted in normal irq context, pass ' ' to mark and the ekbox point
	 * will be replaced in machine check/critical exception process
	 */
	dying_and_reboot_process(regs, ' ');
}

static void dying_params_init(struct dying_int_params *dying_params)
{
	if (dying_params == NULL) {
		printk(KERN_ERR"[DYING]:dyingt_int_params_init failed!\n");
		return;
	}

	memset(dying_params, 0x0, sizeof(struct dying_int_params));

	dying_params->dts_params.irq_type = DYING_IRQ_MCP;
	dying_params->dts_params.delay_time = MIN_DELAY_TIME;
	dying_params->dts_params.ddr_self_refresh = DYING_ENABLE;
	dying_params->status = DYING_DISABLE;
}

static void show_dying_dts_params_info(void)
{
	printk(KERN_INFO "[DYING]: irq_type = %d, irq_cfg_offset = 0x%lx\n",
			g_dying_params.dts_params.irq_type,
			g_dying_params.dts_params.irq_cfg_offset);

	printk(KERN_INFO "[DYING]: delay_time = %d, ddr_self_refresh = %d, ddr_cfg_offset = 0x%lx\n",
			g_dying_params.dts_params.delay_time,
			g_dying_params.dts_params.ddr_self_refresh,
			g_dying_params.dts_params.ddr_cfg_offset);
}

static int get_dying_int_params(struct device_node *np)
{
	void *node_property      = NULL;
	unsigned int tmp_value    = 0;

	/* dying device node */
	if (np == NULL) {
		printk(KERN_ERR"[DYING]: dying device node point null\n");
		return -EINVAL;
	}

	/* irq_type */
	node_property = (void *)of_get_property(np, "nmi_irq_type", NULL);
	if (node_property == NULL) {
		printk(KERN_ERR"[DYING]:device tree nmi_irq_type describle error.\n");
		return -EINVAL;
	}

	tmp_value = *(unsigned int *)node_property;
	if (tmp_value > DYING_IRQ_MAX) {
		printk(KERN_ERR"[DYING]:dying irq type [%d] invalid\n", tmp_value);
		return -EINVAL;
	}

	g_dying_params.dts_params.irq_type = (enum dying_irq_type)tmp_value;

	/* pic irq cfg offeset */
	node_property = (void *)of_get_property(np, "nmi_irq_cfg_offset", NULL);
	if (node_property != NULL)
		g_dying_params.dts_params.irq_cfg_offset = *(unsigned long *)node_property;

	/* delay time */
	node_property = (void *)of_get_property(np, "nmi_delay_time", NULL);
	if (node_property != NULL) {
		tmp_value = *(unsigned int *)node_property;

		if ((tmp_value < MIN_DELAY_TIME) || (tmp_value > MAX_DELAY_TIME)) {
			printk(KERN_ERR"[DYING]:dying delay time[%d] must in [%d,%d] ms.\n",
					tmp_value, MIN_DELAY_TIME, MAX_DELAY_TIME);
			return -EINVAL;
		}

		g_dying_params.dts_params.delay_time = tmp_value;
	}

	/* ddr self refresh */
	node_property = (void *)of_get_property(np, "nmi_ddr_self_refresh", NULL);
	if (node_property != NULL) {
		tmp_value = *(unsigned int *)node_property;
		g_dying_params.dts_params.ddr_self_refresh = (enum dying_bool)!!tmp_value;
	}

	/* pic ddr self refresh cfg offeset */
	node_property = (void *)of_get_property(np, "nmi_ddr_cfg_offset", NULL);
	if (node_property != NULL)
		g_dying_params.dts_params.ddr_cfg_offset = *(unsigned long *)node_property;

	show_dying_dts_params_info();

	printk(KERN_INFO "[DYING]: device tree parse success\n");
	return 0;
}

static int get_pic_irq_params(struct device_node *dying_np, struct device_node *pic_np)
{
	int ret                     = 0;
	void *node_property        = NULL;
	unsigned int irq_cells_nm  = 0;
	unsigned int irq_cells[DYING_IRQ_CELLS_MAX] = {0};

	/* dying device node */
	if ((pic_np == NULL) || (dying_np == NULL)) {
		printk(KERN_ERR"[DYING]: pic device node error\n");
		return -EINVAL;
	}

	/* #interrupt-cells */
	node_property = (void *)of_get_property(pic_np, "#interrupt-cells", NULL);
	if (node_property != NULL) {
		irq_cells_nm = *(unsigned int *)node_property;
		if (irq_cells_nm > DYING_IRQ_CELLS_MAX) {
			printk(KERN_ERR "[DYING]: irq_cells_nm = %d error\n", irq_cells_nm);
			return -EINVAL;
		}
	}
	printk(KERN_INFO "[DYING]: irq_cells_nm = %d\n", irq_cells_nm);

	/* irq */
	ret = of_property_read_u32_array(dying_np, "nmi_interrupt", irq_cells, irq_cells_nm);
	if (ret) {
		printk(KERN_ERR"[DYING]:nmi_interrupt get failed\n");
		return -EINVAL;
	}
	g_dying_params.dts_params.irq.num = irq_cells[0];
	g_dying_params.dts_params.irq.sense = irq_cells[1];
	g_dying_params.dts_params.irq.type = irq_cells[2];
	g_dying_params.dts_params.irq.information = irq_cells[3];

	printk(KERN_INFO "[DYING]: number = %d, sensor = %d, type = %d, information = %d\n",
			g_dying_params.dts_params.irq.num,
			g_dying_params.dts_params.irq.sense,
			g_dying_params.dts_params.irq.type,
			g_dying_params.dts_params.irq.information);

	return 0;
}

static phys_addr_t get_pic_phys_base_addr(struct device_node *pic_np)
{
	phys_addr_t paddr      = 0;
	struct resource r      = {0};

	if (pic_np == NULL) {
		printk(KERN_ERR "Could not find pic node\n");
		return 0;
	}

	if (of_address_to_resource(pic_np, 0, &r)) {
		printk(KERN_ERR "Failed to map mpic register space\n");
		of_node_put(pic_np);
		return 0;
	}

	paddr = r.start;
	return paddr;
}

static int get_pic_base_addr(struct device_node *pic_np)
{
	phys_addr_t pic_phys_addr = 0;

	if (pic_np == NULL) {
		printk(KERN_ERR"[DYING]:dying device node get failed\n");
		return 0;
	}

	pic_phys_addr = get_pic_phys_base_addr(pic_np);
	if (pic_phys_addr == 0) {
		printk(KERN_ERR "[DYING]:get pic interrupt source config address failed!\n");
		return -EINVAL;
	}

	g_dying_params.pic_irq_base_addr = (char *)ioremap((pic_phys_addr +
				g_dying_params.dts_params.irq_cfg_offset), MPIC_IOREMAP_SIZE);

	if (g_dying_params.pic_irq_base_addr == NULL) {
		printk(KERN_ERR "[DYING]:pic irq base addr ioremap failed!\n");
		return -EINVAL;
	}

	return 0;
}

static phys_addr_t get_ddr_phys_ctrl_addr(struct device_node *ddr_np)
{
	struct resource r      = {0};
	phys_addr_t paddr      = 0;

	if (ddr_np == NULL) {
		printk(KERN_ERR "Could not find ddr controller\n");
		return 0;
	}

	if (of_address_to_resource(ddr_np, 0, &r)) {
		printk(KERN_ERR "Failed to map mpic register space\n");
		of_node_put(ddr_np);
		return 0;
	}

	paddr = r.start;
	return paddr;
}

static int get_ddr_ctrl_addr(struct device_node *dying_np)
{
	struct device_node *ddr_np = NULL;
	const __be32 *parp          = NULL;
	phys_addr_t phys_ddr_ctrl_addr = 0;

	if (dying_np == NULL) {
		printk(KERN_ERR"[DYING]:dying device node get failed\n");
		return 0;
	}

	parp = of_get_property(dying_np, "nmi_ddr_controller", NULL);
	if (parp == NULL) {
		printk(KERN_ERR"[DYING]:nmi_ddr_controller get failed\n");
		return -EINVAL;
	}

	ddr_np = of_find_node_by_phandle(be32_to_cpup(parp));
	if (ddr_np == NULL) {
		printk(KERN_ERR"[DYING]:ddr device node get failed\n");
		return -EINVAL;
	}

	phys_ddr_ctrl_addr = get_ddr_phys_ctrl_addr(ddr_np);
	if (phys_ddr_ctrl_addr == 0) {
		printk(KERN_ERR"[DYING]:get ddr control address failed!\n");
		return -EINVAL;
	}

	g_dying_params.ddr_ctrl_addr = (unsigned long)ioremap(phys_ddr_ctrl_addr, DDR_CTRL_IOREMAP_SIZE);

	if (g_dying_params.ddr_ctrl_addr == 0) {
		printk(KERN_ERR"[DYING]:ddr control ioremap failed!\n");
		return -EINVAL;
	}

	return 0;
}

static int set_pic_e500mc(int enable)
{
	volatile unsigned int eivpr = 0;
	volatile unsigned int eilr  = 0;
	volatile unsigned int eidr  = 0;
	char *pic_irq_addr          = NULL;

	pic_irq_addr = g_dying_params.pic_irq_base_addr +
		MPIC_PER_IRQ_OFFSET * g_dying_params.dts_params.irq.num;

	eivpr = *(volatile unsigned int *)(pic_irq_addr + MPIC_EIVPR_OFFSET);
	eilr = *(volatile unsigned int *)(pic_irq_addr + MPIC_EILR_OFFSET);
	eidr = *(volatile unsigned int *)(pic_irq_addr + MPIC_EIDR_OFFSET);
	printk(KERN_INFO"old:EIVPR: %08x EILR: %08x EIDR: %08x\n", eivpr, eilr, eidr);
	if (g_dying_params.dts_params.irq.sense == IRQ_TYPE_LEVEL_HIGH) {
		eivpr |= (1 << MPIC_POLARITY_BIT);
	} else if (g_dying_params.dts_params.irq.sense == IRQ_TYPE_LEVEL_LOW) {
		eivpr &= ~(1 << MPIC_POLARITY_BIT);
	} else {
		printk(KERN_ERR "[DYING]:irq.sense = %d is error\n",
			g_dying_params.dts_params.irq.sense);
		return -EINVAL;
	}
	eivpr |= (1 << MPIC_SENSOR_BIT);

	if (enable == DYING_ENABLE) {
		eivpr &= ~(1 << MPIC_MSK_BIT);
		*(volatile unsigned int *)(pic_irq_addr + MPIC_EIVPR_OFFSET) = eivpr;

		eilr = MPIC_E500MC_MCP_SET;
		*(volatile unsigned int *)(pic_irq_addr + MPIC_EILR_OFFSET) = eilr;

		eidr = (1 << NR_CPUS) - 1;
		/* to every core */
		*(volatile unsigned int *)(pic_irq_addr + MPIC_EIDR_OFFSET) = eidr;
	} else {
		eivpr |= (1 << MPIC_MSK_BIT);
		*(volatile unsigned int *)(pic_irq_addr + MPIC_EIVPR_OFFSET) = eivpr;

		eilr = 0x0;
		*(volatile unsigned int *)(pic_irq_addr + MPIC_EILR_OFFSET) = eilr;
	}
	eivpr = *(volatile unsigned int *)(pic_irq_addr + MPIC_EIVPR_OFFSET);
	eilr = *(volatile unsigned int *)(pic_irq_addr + MPIC_EILR_OFFSET);
	eidr = *(volatile unsigned int *)(pic_irq_addr + MPIC_EIDR_OFFSET);
	printk(KERN_INFO"new:EIVPR: %08x EILR: %08x EIDR: %08x\n", eivpr, eilr, eidr);
	return 0;
}

static int set_pic_e300(int enable)
{
	volatile unsigned int secnr = 0;
	volatile unsigned int semsr = 0;

	secnr = *(volatile unsigned int *)(g_dying_params.pic_irq_base_addr + IPIC_SECNR);
	semsr = *(volatile unsigned int *)(g_dying_params.pic_irq_base_addr + IPIC_SEMSR);
	printk(KERN_INFO "old:IPIC_SECNR: %08x, IPIC_SEMSR: %08x\n", secnr, semsr);

	if (g_dying_params.dts_params.irq.sense == IRQ_TYPE_EDGE_FALLING) {
		secnr |= (1 << IPIC_EDI0_BIT);
	} else if (g_dying_params.dts_params.irq.sense == IRQ_TYPE_LEVEL_LOW) {
		secnr &= ~(1 << IPIC_EDI0_BIT);
	} else {
		printk(KERN_ERR "[DYING]:irq.sense = %d is error\n", g_dying_params.dts_params.irq.sense);
		return -EINVAL;
	}
	/* setting IRQ0 edge sensitive */
	*(volatile unsigned int *)(g_dying_params.pic_irq_base_addr + IPIC_SECNR) = secnr;

	if (enable == DYING_ENABLE) {
		/* setting IRQ0 as external MCP request */
		semsr |= IPIC_MCP_SIRQ0;
		*(volatile unsigned int *)(g_dying_params.pic_irq_base_addr + IPIC_SEMSR) = semsr;
	} else {
		/* setting IRQ0 as external interrupt request and disable */
		semsr &= ~(IPIC_MCP_SIRQ0 | IPIC_IRQ0_ENABLE);
		*(volatile unsigned int *)(g_dying_params.pic_irq_base_addr + IPIC_SEMSR) = semsr;
	}
	secnr = *(volatile unsigned int *)(g_dying_params.pic_irq_base_addr + IPIC_SECNR);
	semsr = *(volatile unsigned int *)(g_dying_params.pic_irq_base_addr + IPIC_SEMSR);
	printk(KERN_INFO "new:IPIC_SECNR: %08x, IPIC_SEMSR: %08x\n", secnr, semsr);
	return 0;
}

static int set_pic_cint_e500(int enable)
{
	volatile unsigned int eivpr = 0;
	volatile unsigned int eidr  = 0;
	char *pic_irq_addr         = NULL;

	pic_irq_addr = g_dying_params.pic_irq_base_addr +
		MPIC_PER_IRQ_OFFSET * g_dying_params.dts_params.irq.num;

	eivpr = *(volatile unsigned int *)(pic_irq_addr + MPIC_EIVPR_OFFSET);
	eidr = *(volatile unsigned int *)(pic_irq_addr + MPIC_EIDR_OFFSET);
	printk(KERN_INFO "old:EIVPR: %08x EIDR: %08x\n", eivpr, eidr);

	if (g_dying_params.dts_params.irq.sense == IRQ_TYPE_LEVEL_HIGH) {
		eivpr |= (1 << MPIC_POLARITY_BIT);
	} else if (g_dying_params.dts_params.irq.sense == IRQ_TYPE_LEVEL_LOW) {
		eivpr &= ~(1 << MPIC_POLARITY_BIT);
	} else {
		printk(KERN_ERR "[DYING]:irq.sense = %d is error\n",
			g_dying_params.dts_params.irq.sense);
		return -EINVAL;
	}
	eivpr |= (1 << MPIC_SENSOR_BIT);

	if (enable == DYING_ENABLE) {
		eivpr &= ~(1 << MPIC_MSK_BIT);
		*(volatile unsigned int *)(pic_irq_addr + MPIC_EIVPR_OFFSET) = eivpr;

		/* only support single core now */
		eidr = MPIC_E500_CINT_SET;
		*(volatile unsigned int *)(pic_irq_addr + MPIC_EIDR_OFFSET) = eidr;
	} else {
		eivpr |= (1 << MPIC_MSK_BIT);
		*(volatile unsigned int *)(pic_irq_addr + MPIC_EIVPR_OFFSET) = eivpr;
	}
	eivpr = *(volatile unsigned int *)(pic_irq_addr + MPIC_EIVPR_OFFSET);
	eidr = *(volatile unsigned int *)(pic_irq_addr + MPIC_EIDR_OFFSET);
	printk(KERN_INFO "new:EIVPR: %08x EIDR: %08x\n", eivpr, eidr);
	return 0;
}

static int set_pic_dying_irq(int enable)
{
	int ret = -EINVAL;

	if (g_dying_params.status == enable) {
		printk(KERN_WARNING "[DYING]: dying %s alreadly\n", enable ? "enable" : "disable");
		return 0;
	}

	if ((g_dying_params.dts_params.irq_type != DYING_IRQ_MCP)
			&& (g_dying_params.pic_irq_base_addr == NULL)) {
		printk(KERN_ERR "[DYING]: pic_addr is NULL!\n");
		return -EINVAL;
	}

	switch (g_dying_params.dts_params.irq_type) {
	case DYING_IRQ_MCP_E500MC:
		{
			ret = set_pic_e500mc(enable);
			break;
		}
	case DYING_IRQ_MCP_E300:
		{
			ret = set_pic_e300(enable);
			break;
		}
	case DYING_IRQ_CINT:
		{
			ret = set_pic_cint_e500(enable);
			break;
		}
	default:
		{
			printk(KERN_INFO "[DYING]: dying irq_type = %d not support\n",
					g_dying_params.dts_params.irq_type);
			break;
		}
	}

	if (ret < 0) {
		printk(KERN_ERR "[DYING]: dying %s failed\n", enable ? "enable" : "disable");
		return -EINVAL;
	}

	g_dying_params.status = (enum dying_bool)enable;

	printk(KERN_INFO "[DYING]: dying %s success\n", enable ? "enable" : "disable");
	return 0;
}

static void do_dying_callback(struct pt_regs *regs)
{
	struct dying_callback *dying_callback_node = NULL;
	struct pt_regs *local_regs                 = NULL;
	u32 cpu                                    = 0;

	local_regs = regs;
	cpu = smp_processor_id();

	if (local_regs != NULL)
		memcpy(&per_cpu(per_fiq_args, cpu), local_regs, sizeof(struct pt_regs));

	bust_spinlocks(1);
	rcu_read_lock();
	list_for_each_entry_rcu(dying_callback_node, &(dying_callback_head.entry), entry) {
		if (dying_callback_node->notify_func != NULL)
			dying_callback_node->notify_func(0);
	}
	rcu_read_unlock();
}

static int set_nmi_pic(struct device_node *dying_np)
{
	int ret;
	const __be32 *parp;
	struct device_node *pic_np;

	/* mcp direct enbale by hardware not need setting pic */
	if (g_dying_params.dts_params.irq_type == DYING_IRQ_MCP) {
		g_dying_params.status = DYING_ENABLE;
		printk(KERN_INFO "[DYING]:mcp had been enabled by hardware,not need setting irq\n");
		return 0;
	}

	parp = of_get_property(dying_np, "nmi_pic", NULL);
	if (parp == NULL) {
		printk(KERN_ERR"[DYING]:nmi_pic get failed\n");
		return -EINVAL;
	}

	pic_np = of_find_node_by_phandle(be32_to_cpup(parp));
	if (pic_np == NULL) {
		printk(KERN_ERR"[DYING]:pic device node get failed\n");
		return -EINVAL;
	}

	ret = get_pic_irq_params(dying_np, pic_np);
	if (ret < 0) {
		printk(KERN_INFO "[DYING]: get irq params failed\n");
		return -EINVAL;
	}

	ret = get_pic_base_addr(pic_np);
	if (ret < 0) {
		printk(KERN_INFO "[DYING]: pic base address get failed\n");
		return -EINVAL;
	}

	/* device tree define NMI,so enable it */
	if (set_pic_dying_irq(DYING_ENABLE) != 0) {
		iounmap((void *)g_dying_params.ddr_ctrl_addr);
		return  -EFAULT;
	}
	return 0;
}

static int powerpc_dying_init(void)
{
	int ret                       = 0;
	struct device_node *dying_np = NULL;
	int register_dying = 0;

	INIT_LIST_HEAD(&dying_callback_head.entry);
	dying_callback_head.notify_func = NULL;

	rcu_assign_pointer(before_self_refresh_fun, NULL);

	/* default setting */
	dying_params_init(&g_dying_params);

	/* dying device node */
	dying_np = of_find_compatible_node(NULL, NULL, "powerpc,nmi");
	if (dying_np == NULL) {
		printk(KERN_INFO"[DYING]: device tree have no node with \"powerpc,nmi\"");
		return -EINVAL;
	}

	if (of_property_read_u32(dying_np, "regiter_dying_to_softdog", &register_dying))
		of_property_read_u32(dying_np, "senior-dog-overtime-reboot", &register_dying);
	if (register_dying)
		register_softdog_final_notify(dying_from_soft_watchdog);

	/* parse dts params */
	ret = get_dying_int_params(dying_np);
	if (ret < 0) {
		printk(KERN_INFO "[DYING]: device tree parse failed\n");
		ret = -EINVAL;
		goto err;
	}

	/* get ddr addr */
	if (g_dying_params.dts_params.ddr_self_refresh == DYING_ENABLE) {
		ret = get_ddr_ctrl_addr(dying_np);
		if (ret < 0) {
			printk(KERN_INFO "[DYING]: ddr ctrl address get failed\n");
			ret = -EINVAL;
			goto err;
		}
	}

	ret = set_nmi_pic(dying_np);
	if (ret)
		goto err;

	printk(KERN_INFO "[DYING]: dying init success\n");
	return 0;
err:
	return ret;
}

void powerpc_dying_exit(void)
{
	rcu_assign_pointer(before_self_refresh_fun, NULL);
	synchronize_rcu();
	iounmap((void *)g_dying_params.ddr_ctrl_addr);
	iounmap((void *)g_dying_params.pic_irq_base_addr);
}

module_init(powerpc_dying_init);
module_exit(powerpc_dying_exit);

MODULE_LICENSE("GPL");

/* e500 ext-watchdog time out. */
void critical_exception(struct pt_regs *regs)
{
#ifdef CONFIG_RTOS_EARLY_KBOX
	int cpu = raw_smp_processor_id();

	if (ekbox_pcontent)
		*(ekbox_pcontent + cpu) = ENTER_CRITICAL_EXCEPTION;
#endif
	/* disable printk for avoid deadlock in fiq */
	set_dying_mode();
	dying_watchdog_proc(regs, EXIT_CRITICAL_EXCEPTION);
}

#ifdef CONFIG_RTOS_EARLY_KBOX
static int __init fiq_glue_ekbox_buffer_init(void)
{
	if (ekbox_pcontent) {
		/*
		 * Initializing ekbox partition is used to record the information of critical
		 * interrupt in and out exception handling function, initialize partition to
		 * space, the ASCII code is 0x20.
		 */
		memset(ekbox_pcontent, 0x20, EKBOX_RESERVE_SIZE - 1);
		*(ekbox_pcontent + (EKBOX_RESERVE_SIZE - 1)) = '\n';
	}
	return 0;
}
arch_initcall(fiq_glue_ekbox_buffer_init);
#endif
