/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2018-2019.
 * Description: support fiq_glue_register.c for arm64
 * Author: xiaojiangfeng <xiaojiangfeng@huawei.com>
 * Create: 2018-09-13
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/proc_fs.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <asm/delay.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/errno.h>
#include <linux/hal/fiq_glue.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <linux/of_address.h>
#include "../../fiq_glue_com/fiq_glue_private.h"
#ifdef CONFIG_FIQ_GLUE_DFX
#include <linux/crc32.h>
#endif
#ifdef CONFIG_HISI_WATCHDOG
#include <linux/hal/watchdog.h>
#endif
#include <linux/hal/cpu_param.h>
#include <linux/hal/gpio_lock.h>
#ifdef CONFIG_RTOS_HAL_HISI_DJTAG_SUPPORT_LLC
#include <linux/hal/hisi_llc_ops.h>
#endif

#ifdef CONFIG_RTOS_EARLY_KBOX
#include <linux/early_kbox.h>
#endif

#ifdef CONFIG_RTOS_SHADOW_CALL_STACK
#include <linux/scs.h>
#endif

#ifndef	RTOS_HAL_SUPPORT_HISI_DJTAG_LOCK
void __weak rtos_djtag_lock_init(void) {}
#endif

struct fiq_private_data fiq_priv;
static bool ipi_flag;
static DEFINE_PER_CPU(struct pt_regs, per_fiq_args);

static spinlock_t fiq_dfx_lock;

bool entry_fiq_mode;
#ifdef CONFIG_FIQ_GLUE_DFX
static DEFINE_PER_CPU(uint64_t, crc_sec);
static DEFINE_PER_CPU(uint64_t, crc_nosec);
#endif

#ifdef CONFIG_RTOS_SHADOW_CALL_STACK
DEFINE_PER_CPU_ALIGNED(unsigned long [SCS_SIZE / sizeof(long)], fiq_scs_stack);

static inline void update_x18(void)
{
	unsigned long *fiq_scs_stack_ptr = this_cpu_ptr(fiq_scs_stack);

	*__scs_magic(fiq_scs_stack_ptr) = SCS_END_MAGIC;
	asm volatile("mov x18, %0" : : "r" (fiq_scs_stack_ptr));
	task_scs(current) = fiq_scs_stack_ptr;
	task_scs_sp(current) = fiq_scs_stack_ptr;
}
#endif

void fiq_clean_source(void)
{
	if (fiq_priv.callback.fiq_cleansourse_handle)
		(*fiq_priv.callback.fiq_cleansourse_handle)();

#ifdef CONFIG_HISI_WATCHDOG
	if (fiq_priv.fiq_clean_wtg)
		call_hwdog_handle_nolock();
#endif
}

struct fiq_num_node *fiq_find_node(unsigned int fiq_num)
{
	struct fiq_num_node *fiq_node = NULL;
	struct list_head *t = NULL;

	list_for_each(t, &fiq_priv.entry) {
		fiq_node = list_entry(t, struct fiq_num_node, node);
		if (fiq_node->fiq_num == fiq_num)
			return fiq_node;
	}

	return NULL;
}

struct fiq_callback_node *fiq_find_callback(struct fiq_num_node *fiq_node,
	void (*fiq_callback_func)(unsigned int))
{
	struct fiq_callback_node *callback_node = NULL;
	struct list_head *t = NULL;

	list_for_each(t, &fiq_node->callback_head) {
		callback_node = list_entry(t, struct fiq_callback_node, node);
		if (callback_node->callback == fiq_callback_func)
			return callback_node;
	}

	return NULL;
}

void fiq_free_callback(struct fiq_num_node *fiq_node)
{
	struct fiq_callback_node *callback_node = NULL;
	struct list_head *t = NULL;
	struct list_head *n = NULL;

	list_for_each_safe(t, n, &fiq_node->callback_head) {
		callback_node = list_entry(t, struct fiq_callback_node, node);
		list_del(&callback_node->node);
		kfree(callback_node);
	}
}

uint64_t *fiq_glue_reg(uint64_t *sp_addr)
{
	static uint64_t *args;

	if (sp_addr)
		args = sp_addr;
	return args;
}

static void fiq_save_regs(struct pt_regs *args, void *info)
{
	struct fiq_func_params *params = (struct fiq_func_params *)info;
	struct fiq_pt_regs *regs = params->regs;
	int i = 0;
#ifdef CONFIG_RTOS_HAL_FIQ_ARM_WITH_TF
	if (params->regs) {
		for (i = 0; i < FIQ_COMM_REGS_ARM; i++)
			args->uregs[i] = (uint32_t)regs->regs[i];
		args->ARM_sp = (uint32_t)regs->sp;
		args->ARM_pc = (uint32_t)regs->pc;
		args->ARM_cpsr = (uint32_t)regs->pstate;
	}
#else
	if (params->regs) {
		for (i = 0; i < 31; i++)
			args->regs[i] = regs->regs[i];
		args->sp = regs->sp;
		args->pc = regs->pc;
		args->pstate = regs->pstate;

		args->orig_x0 = regs->orig_x0;
		args->syscallno = regs->syscallno;
	}
#endif
}

struct pt_regs *fiq_get_regs(void)
{
	uint64_t cpu;

	cpu = smp_processor_id();
	return &per_cpu(per_fiq_args, cpu);
}
EXPORT_SYMBOL(fiq_get_regs);

static inline int fiq_glue_action_pack(void *info)
{
	uint64_t fiq_num = 0;
	struct fiq_num_node *fiq_node = NULL;
	struct list_head *t = NULL;
	struct fiq_callback_node *callback_node = NULL;
	struct pt_regs *args = NULL;
	uint64_t cpu;

	cpu = smp_processor_id();
	if (fiq_priv.fiq_source == FIQ_GIC_SOURCE)
		fiq_num = ((struct  fiq_func_params *)info)->fiq_num;

	args = &per_cpu(per_fiq_args, cpu);
	fiq_save_regs(args, info);

	fiq_node = fiq_find_node(fiq_num);
	if (!fiq_node) {
		pr_info("[RTOS fiq]@@%llu find NO fiq NODE @fiq:%llx\n", cpu, fiq_num);
		goto out;
	}
	list_for_each(t, &fiq_node->callback_head) {
		callback_node = list_entry(t, struct fiq_callback_node, node);
		callback_node->callback(fiq_num);
	}
#ifdef CONFIG_RTOS_EARLY_KBOX
	/* in ekbox reserve mem, the log is 'abcd*' */
	if (ekbox_pcontent)
		*(ekbox_pcontent + num_present_cpus() + cpu) = 'A' + cpu%16;
#endif
	/* flush all cache buffer */
	wmb();
#ifdef CONFIG_RTOS_HAL_HI1382_FLUSH_WRITEBUFFER_FIX
	/* solve 1382 FIQ problems caused by outer cache */
	writer_buffer_flush();
#endif

out:
	/* for avoid FIQ reentry, some boards need to dead loop */
	if (fiq_priv.fiq_deadloop) {
		while (1)
			;
	}
	return 0;
}

#ifdef CONFIG_FIQ_GLUE_DFX
uint64_t checksum(uint8_t *buf, uint64_t size)
{
	uint64_t i = 0;
	uint64_t sum = 0;

	for (i = 0; i < size; i++)
		sum += (uint64_t) *(buf+i);
	return sum;
}
#endif

static atomic_t fiq_init_djtag_lock;

static void fiq_glue_inv_cache_range(const void *start, const void *end)
{
#ifndef CONFIG_RTOS_HAL_FIQ_ARM_WITH_TF
	inval_dcache_range((unsigned long)(uintptr_t)start, (unsigned long)(uintptr_t)end);
#endif
}

#ifdef CONFIG_FIQ_GLUE_DFX
void fiq_get_checksum(uint64_t *old_crc, uint64_t *new_crc)
{
	uint64_t cpu;

	if (!old_crc || !new_crc)
		return;

	cpu = smp_processor_id();

	*old_crc = per_cpu(crc_sec, cpu);
	*new_crc = per_cpu(crc_nosec, cpu);
}
EXPORT_SYMBOL_NS(fiq_get_checksum, HW_RTOS_NS);

static inline void fiq_set_checksum(struct fiq_secure_buf *fiq_secure_buf, uint64_t size)
{
	uint64_t cpu;

	cpu = smp_processor_id();
	per_cpu(crc_nosec, cpu) = checksum((uint8_t *)fiq_secure_buf, size);
	per_cpu(crc_sec, cpu) = fiq_secure_buf->crc32_check_sum;
}

static inline void dfx_context_show(void *buf, uint64_t size)
{
	static uint64_t loop;

	/* this function should only run once before goto dead, so unlock fiq_dfx_lock is unneccessary */
	if (!spin_trylock(&fiq_dfx_lock))
		return;
	if (loop)
		return;
	for (loop = 0; loop < size / 8; loop++) {
		if (*((uint64_t *)buf + loop))
			printk("[RTOS fiq]DFX Err @%pK\t Val:0x%016llx\n",
					(uint64_t *)((uint64_t *)buf + loop),
					*(uint64_t *)((uint64_t *)buf + loop));
	}
}
#endif

static void fiq_glue_action(struct fiq_glue_handler *handler)
{
	struct	fiq_func_params params;
	void *info = &params;
	void *buf = (void *)(fiq_glue_reg(NULL));
	struct fiq_secure_buf *fiq_secure_buf = NULL;
	uint64_t cpu;
#ifdef CONFIG_FIQ_GLUE_DFX
	/* here to set oops_in_progress to avoid printk deadlock */
	bust_spinlocks(1);
	/* here to re-init the djtag spin lock, to avoid fiq happened when operating djtag with this lock */
	if (atomic_inc_return(&fiq_init_djtag_lock) == 1)
		rtos_djtag_lock_init();
#endif

	/*
	 * reinit rtos_gpio_spinlock here to avoid deadlock in fiq
	 * when call printk while hold this spinlock which will
	 * try to get this spinlock.
	 */
	rtos_gpio_spinlock_init();
	cpu = smp_phy_processor_id();
	fiq_secure_buf = (struct fiq_secure_buf *)(buf?(buf + cpu * FIQ_GLUE_REG_BUF_SIZE) : NULL);

	if (fiq_priv.need_invalid_cache)
		fiq_glue_inv_cache_range(fiq_secure_buf, (void *)fiq_secure_buf + FIQ_GLUE_REG_BUF_SIZE);

	params.h = handler;
	if (fiq_secure_buf == NULL) {
		printk(KERN_ERR "[RTOS fiq] fiq not init, quit Now.\n");
		return;
	}
	params.regs = &fiq_secure_buf->regs;
	params.svc_sp = NULL;
	params.fiq_num = fiq_secure_buf->fiq_num;
#ifdef CONFIG_FIQ_GLUE_DFX
	fiq_set_checksum(fiq_secure_buf, FIQ_BUF_SIZE);
	dfx_context_show((void *)nmi_virt_dfx_base, NMI_CTX_BUFFER_DFX_SIZE);
#endif

#ifdef CONFIG_SMP /* need to  check if should be IPI to other Core */
	/* ARM64 do not support 1-N FIQ in OS level */
	if (FIQ_1_N_TYPE == fiq_priv.fiq_type && ipi_flag)
		ipi_flag = false;
#endif
	fiq_glue_action_pack(info);
}

static void fiq_glue_default_action(struct fiq_glue_handler *h)
{
	printk("fiq default action\n");
}

struct fiq_glue_handler fiq_glue_handler = {
	.fiq = fiq_glue_default_action,
};

static int fiq_glue_handler_set(void)
{
	struct fiq_glue_handler *handler = &fiq_glue_handler;

	handler->fiq = fiq_glue_action;
	handler->resume = NULL;

	return 0;
}

static void update_current(void)
{
#ifdef CONFIG_THREAD_INFO_IN_TASK
	asm volatile("msr sp_el0, %0" : : "r" (raw_cpu_read(__entry_task)));
#endif
}

static void fiq_point_to_ekbox(void)
{
#ifdef CONFIG_RTOS_EARLY_KBOX
		unsigned int cpu = smp_processor_id();
		/* in ekbox reserve mem, the log is 'abcd*' */
		if (ekbox_pcontent)
			*(ekbox_pcontent + cpu) = 'a' + cpu % 16;
#endif
}

__noscs void fiq_el1_trigger(void)
{
	struct fiq_glue_handler *handler = &fiq_glue_handler;

	update_current();

#ifdef CONFIG_RTOS_SHADOW_CALL_STACK
	update_x18();
#endif

	fiq_point_to_ekbox();

	/*
	 * don't use printk before entry_fiq_mode = true
	 * otherwise some boards as 1382 will cause wtd bugs
	 */
	entry_fiq_mode = true;

	handler->fiq(handler);

	tf_restore_context();
}

static int fiq_priv_init(void)
{
	int ret;

	if (fiq_priv.callback.fiq_init_handle) {
		ret = (*fiq_priv.callback.fiq_init_handle)();
		if (ret) {
			printk(KERN_ERR"init handle error%d\n", ret);
			return ret;
		}
	}
	return 0;
}

static int fiq_create_new_node(struct fiq_num_node **node_ptr,
	void (*fiq_callback_func)(unsigned int),
	unsigned int fiq_num)
{
	struct fiq_num_node *fiq_node = NULL;
	struct fiq_callback_node *callback_node = NULL;

	fiq_node = kzalloc(sizeof(*fiq_node), GFP_KERNEL);
	if (!fiq_node) {
		printk(KERN_ERR "[%s]kmalloc for first fiq node failed!\n", __func__);
		return -ENOMEM;
	}

	callback_node = kzalloc(sizeof(*callback_node), GFP_KERNEL);
	if (!callback_node) {
		printk(KERN_ERR "[%s]kmalloc for callback node failed!\n", __func__);
		goto error_kzalloc;
	}

	/* Save args */
	fiq_node->fiq_num = fiq_num;
	/* Init the list which holds all fiq callback functions */
	INIT_LIST_HEAD(&fiq_node->callback_head);
	/* Mount callback functions on the callback_node */
	callback_node->callback = fiq_callback_func;
	list_add_tail(&callback_node->node, &fiq_node->callback_head);

	*node_ptr = fiq_node;
	return 0;

error_kzalloc:
	kfree(fiq_node);
	return -ENOMEM;
}

static int fiq_register_for_exist_node(struct fiq_num_node *fiq_node,
	void (*fiq_callback_func)(unsigned int))
{
	struct fiq_callback_node *callback_node;

#ifdef CONFIG_RTOS_HAL_FIQ_GLUE_EARLY
	/* if node registered with early fiq callback, unregister them at first */
	unregister_early_fiq_callback_node(fiq_node);
#endif

	callback_node = fiq_find_callback(fiq_node, fiq_callback_func);
	if (callback_node) {
		printk(KERN_ERR "[%s]FIQ%d callback function has exist.\n",
				__func__, fiq_node->fiq_num);
		return ERR_CALLBACK_EXIST;
	}

	callback_node = kzalloc(sizeof(*callback_node), GFP_KERNEL);
	if (!callback_node) {
		printk(KERN_ERR "[%s]kmalloc for callback node failed!\n", __func__);
		return -ENOMEM;
	}

	/* Mount callback functions on the callback_node */
	callback_node->callback = fiq_callback_func;
	list_add_tail(&callback_node->node, &fiq_node->callback_head);

	return 0;
}

int fiq_first_register(void (*fiq_callback_func)(unsigned int),
	unsigned int fiq_num)
{
	struct fiq_num_node *fiq_node = NULL;
	int ret = 0;

	spin_lock_init(&fiq_dfx_lock);

	ret = fiq_create_new_node(&fiq_node, fiq_callback_func, fiq_num);
	if (ret) {
		printk(KERN_ERR "[%s]create for a new node failed!\n", __func__);
		return ret;
	}

	/* Initialize the entry of the fiq_nodes list */
	INIT_LIST_HEAD(&fiq_priv.entry);
	list_add_tail(&fiq_node->node, &fiq_priv.entry);

	ret = fiq_priv_init();
	if (ret) {
		printk(KERN_ERR "[%s]fiq priv init error:%d.\n", __func__, ret);
		goto error_kree;
	}

	printk(KERN_ERR "[RTOS fiq]@@ register fiq\n");
	fiq_glue_handler_set();

	fiq_glue_register_handler(NULL);
	return 0;

error_kree:
	fiq_free_callback(fiq_node);
	list_del(&fiq_node->node);
	kfree(fiq_node);
	return ret;
}

int fiq_more_register(void (*fiq_callback_func)(unsigned int),
	unsigned int fiq_num)
{
	struct fiq_num_node *fiq_node;
	int ret = 0;

	fiq_node = fiq_find_node(fiq_num);
	if (!fiq_node) {
		/* Create a new node when not finding any exiting node */
		ret = fiq_create_new_node(&fiq_node, fiq_callback_func, fiq_num);
		if (ret < 0) {
			printk(KERN_ERR "[%s]create for a new node failed!\n",
				__func__);
			return ret;
		}

		list_add_tail(&fiq_node->node, &fiq_priv.entry);
	} else {
		/* Mount callback function on the fiq_node when find one */
		ret = fiq_register_for_exist_node(fiq_node, fiq_callback_func);
	}
	return ret;
}

void fiq_register_handle(int (*init)(void), void (*clean)(void))
{
	fiq_priv.callback.fiq_init_handle = init;
	fiq_priv.callback.fiq_cleansourse_handle = clean;
}
#ifdef CONFIG_RTOS_HAL_FIQ_ARM_WITH_TF
void fiq_register_gic(struct fiq_gic_handle *handle)
{}
#endif
