/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020.
 * Description: support fiq_glue_register.c for x86
 * Author: yingelin <yingelin@huawei.com>
 * Create: 2020-10-21
 */
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/smp.h>
#include <linux/list.h>
#include <linux/nmi.h>
#include <linux/atomic.h>
#include <asm/apic.h>
#ifdef CONFIG_RTOS_EARLY_KBOX
#include <linux/early_kbox.h>
#endif
#include <asm/irq_vectors.h>
#include "fiq_glue_private.h"

struct fiq_private_data fiq_priv;
static atomic_t wait_for_ipi;
static unsigned int online_cpus;
static DEFINE_PER_CPU(struct pt_regs, per_fiq_args);
static DEFINE_PER_CPU(int, fiq_handled);
bool entry_fiq_mode;
#define FIQ_ENTER 0
#define FIQ_EXIT 1

void fiq_clean_source(void)
{
	if (fiq_priv.callback.fiq_cleansource_handle)
		(*fiq_priv.callback.fiq_cleansource_handle)();
}

struct fiq_num_node *fiq_find_node(unsigned int fiq_num)
{
	struct fiq_num_node *fiq_node = NULL;
	struct list_head *t = NULL;

	list_for_each(t, &fiq_priv.entry) {
		fiq_node = list_entry(t, struct fiq_num_node, node);
		if (fiq_node->fiq_num == fiq_num)
			return fiq_node;
	}

	return NULL;
}

struct fiq_callback_node *fiq_find_callback(struct fiq_num_node *fiq_node,
	void (*fiq_callback_func)(unsigned int))
{
	struct fiq_callback_node *callback_node = NULL;
	struct list_head *t = NULL;

	list_for_each(t, &fiq_node->callback_head) {
		callback_node = list_entry(t, struct fiq_callback_node, node);
		if (callback_node->callback == fiq_callback_func)
			return callback_node;
	}

	return NULL;
}

void fiq_free_callback(struct fiq_num_node *fiq_node)
{
	struct fiq_callback_node *callback_node = NULL;
	struct list_head *t = NULL;
	struct list_head *n = NULL;

	list_for_each_safe(t, n, &fiq_node->callback_head) {
		callback_node = list_entry(t, struct fiq_callback_node, node);
		list_del(&callback_node->node);
		kfree(callback_node);
	}
}

static void fiq_save_regs(struct pt_regs *args, void *info)
{
	struct fiq_func_params *params = (struct fiq_func_params *)info;
	struct pt_regs *regs = params->regs;

	memcpy(args, regs, sizeof(struct pt_regs));
}

struct pt_regs *fiq_get_regs(void)
{
	uint64_t cpu;

	cpu = raw_smp_processor_id();
	return &per_cpu(per_fiq_args, cpu);
}
EXPORT_SYMBOL(fiq_get_regs);

static void fiq_point_to_ekbox(int mode)
{
#ifdef CONFIG_RTOS_EARLY_KBOX
	unsigned int cpu = smp_processor_id();
	char start = 'a';
	int base = 0;
	if (mode) {
		start = 'A';
		base = online_cpus;
	}
	if (ekbox_pcontent)
		*(ekbox_pcontent + base + cpu) = start + cpu % 26;
#endif
}

static void fiq_glue_action_pack(void *info)
{
	struct fiq_num_node *fiq_node = NULL;
	struct fiq_callback_node *callback_node = NULL;
	struct list_head *t = NULL;
	struct pt_regs *args = NULL;
	unsigned int fiq_num = FIQ_NMI_NUM;
	int cpu;

	fiq_point_to_ekbox(FIQ_ENTER);
	cpu = raw_smp_processor_id();
	args = &per_cpu(per_fiq_args, cpu);
	fiq_save_regs(args, info);

	fiq_node = fiq_find_node(fiq_num);
	if (!fiq_node) {
		pr_err("no fiq%u callback\n", fiq_num);
		return;
	}

	list_for_each(t, &fiq_node->callback_head) {
		callback_node = list_entry(t, struct fiq_callback_node, node);
		callback_node->callback(fiq_num);
	}

	fiq_point_to_ekbox(FIQ_EXIT);
}

static void fiq_glue_action(struct fiq_glue_handler *handler, void *regs)
{
	struct fiq_func_params params;
	void *info = &params;

	params.h = handler;
	params.regs = regs;
	fiq_glue_action_pack(info);
}

void fiq_glue_default_action(struct fiq_glue_handler *handler, void *regs)
{
	pr_err("fiq default action\n");
}

struct fiq_glue_handler fiq_glue_handler = {
	.fiq = fiq_glue_default_action,
};

static inline bool is_fiq(void)
{
	if ((fiq_priv.callback.fiq_get_source && fiq_priv.callback.fiq_get_source() == FIQ_TARGET)
	    || atomic_read(&wait_for_ipi) != 0) /* wait_for_ipi != 0 means fiq from ipi */
		return 1;
	return 0;
}

static inline void fiq_send_ipi(void)
{
	if (atomic_cmpxchg(&wait_for_ipi, 0, 1) == 0) {
		fiq_clean_source();
		wmb();
		apic->send_IPI_allbutself(NMI_VECTOR);
	}
}

static inline bool is_fiq_handled(void)
{
	if (*this_cpu_ptr(&fiq_handled))
		return 1;
	*this_cpu_ptr(&fiq_handled) = 1;
	return 0;
}

int fiq_trigger(unsigned int cmd, struct pt_regs *regs)
{
	struct fiq_glue_handler *handler = &fiq_glue_handler;

	if (!is_fiq())
		return NMI_DONE;

	entry_fiq_mode = true;
	fiq_send_ipi();

	if (is_fiq_handled())
		return NMI_DONE;

	handler->fiq(handler, regs);

	/* do other actions after fiq callback, such as deadloop or any other */
	if (fiq_priv.callback.fiq_post_callback)
		fiq_priv.callback.fiq_post_callback();

	return NMI_HANDLED;
}

static int fiq_glue_handler_set(void)
{
	struct fiq_glue_handler *handler = &fiq_glue_handler;

	handler->fiq = fiq_glue_action;
	if (handler->private)
		return -EEXIST;
	handler->private = kzalloc(FIQ_GLUE_PRIVATE_SZ, GFP_KERNEL);
	if (!handler->private)
		return -ENOMEM;
	return 0;
}

static void fiq_glue_handler_reset(void)
{
	fiq_glue_handler.fiq = fiq_glue_default_action;
	kfree(fiq_glue_handler.private);
	fiq_glue_handler.private = NULL;
}

static int fiq_priv_init(void)
{
	int ret;

	if (!fiq_priv.callback.fiq_init_handle) {
		pr_err("fiq_init_handle is NULL");
		return -EINVAL;
	}

	ret = (*fiq_priv.callback.fiq_init_handle)();
	if (ret) {
		pr_err("init handle error%d\n", ret);
		return ret;
	}

	return ret;
}

static int fiq_glue_init(void)
{
	int ret;

	ret = fiq_glue_handler_set();
	if (ret) {
		pr_err("fiqglue set handler failed!\n");
		return ret;
	}

	ret = fiq_glue_register_handler(&fiq_glue_handler);
	if (ret) {
		pr_err("fiqglue: cannot register fig glue handler\n");
		goto error_register;
	}

	return 0;

error_register:
	fiq_glue_handler_reset();
	return ret;
}

static int fiq_create_new_node(struct fiq_num_node **node_ptr,
	void (*fiq_callback_func)(unsigned int),
	unsigned int fiq_num)
{
	struct fiq_num_node *fiq_node = NULL;
	struct fiq_callback_node *callback_node = NULL;

	fiq_node = kzalloc(sizeof(*fiq_node), GFP_KERNEL);
	if (!fiq_node) {
		pr_err("[%s]kmalloc for first fiq node failed!\n", __func__);
		return -ENOMEM;
	}

	callback_node = kzalloc(sizeof(*callback_node), GFP_KERNEL);
	if (!callback_node) {
		pr_err("[%s]kmalloc for callback node failed!\n", __func__);
		goto error_kzalloc;
	}

	/* store input parameters */
	fiq_node->fiq_num = fiq_num;
	/* init callback function list */
	INIT_LIST_HEAD(&fiq_node->callback_head);

	/* mount callback function */
	callback_node->callback = fiq_callback_func;
	list_add_tail(&callback_node->node, &fiq_node->callback_head);

	*node_ptr = fiq_node;
	return 0;

error_kzalloc:
	kfree(fiq_node);
	return -ENOMEM;
}

static int fiq_register_for_exist_node(struct fiq_num_node *fiq_node,
	void (*fiq_callback_func)(unsigned int))
{
	struct fiq_callback_node *callback_node;

	callback_node = fiq_find_callback(fiq_node, fiq_callback_func);
	if (callback_node) {
		pr_err("[%s]FIQ%d callback function has exist.\n",
				__func__, fiq_node->fiq_num);
		return ERR_CALLBACK_EXIST;
	}

	callback_node = kzalloc(sizeof(*callback_node), GFP_KERNEL);
	if (!callback_node) {
		pr_err("[%s]kmalloc for callback node failed!\n", __func__);
		return -ENOMEM;
	}

	/* mount callback function */
	callback_node->callback = fiq_callback_func;
	list_add_tail(&callback_node->node, &fiq_node->callback_head);

	return 0;
}

void fiq_register_handler(int (*enable_nmi)(void),
	void (*disable_nmi)(void),
	unsigned int (*get_nmi_cause)(void),
	void (*fiq_post_callback)(void))
{
	fiq_priv.callback.fiq_init_handle = enable_nmi;
	fiq_priv.callback.fiq_cleansource_handle = disable_nmi;
	fiq_priv.callback.fiq_get_source = get_nmi_cause;
	fiq_priv.callback.fiq_post_callback = fiq_post_callback;
}
EXPORT_SYMBOL(fiq_register_handler);

static void fiq_glue_deinit(void)
{
	fiq_glue_unregister_handler();
	fiq_glue_handler_reset();
}

int fiq_first_register(void (*fiq_callback_func)(unsigned int),
	unsigned int fiq_num)
{
	struct fiq_num_node *fiq_node = NULL;
	int ret = 0;

	online_cpus = num_online_cpus();

	ret = fiq_create_new_node(&fiq_node, fiq_callback_func, fiq_num);
	if (ret) {
		pr_err("[%s]create for a new node failed!\n", __func__);
		return ret;
	}

	/* init list entry */
	INIT_LIST_HEAD(&fiq_priv.entry);
	list_add_tail(&fiq_node->node, &fiq_priv.entry);

	ret = fiq_glue_init();
	if (ret) {
		pr_err("[%s]fiq glue init error:%d.\n", __func__, ret);
		goto error_kree;
	}

	ret = fiq_priv_init();
	if (ret) {
		pr_err("[%s]fiq priv init error:%d.\n", __func__, ret);
		goto error_init;
	}

	proc_fiqstat_init();

	return 0;

error_init:
	fiq_glue_deinit();
error_kree:
	fiq_free_callback(fiq_node);
	list_del(&fiq_node->node);
	kfree(fiq_node);
	return ret;
}

int fiq_more_register(void (*fiq_callback_func)(unsigned int),
	unsigned int fiq_num)
{
	struct fiq_num_node *fiq_node;
	int ret = 0;

	fiq_node = fiq_find_node(fiq_num);
	if (!fiq_node) {
		/* create new node if no existed node found */
		ret = fiq_create_new_node(&fiq_node, fiq_callback_func, fiq_num);
		if (ret < 0) {
			pr_err("[%s]create for a new node failed!\n",
				__func__);
			return ret;
		}

		list_add_tail(&fiq_node->node, &fiq_priv.entry);
	} else {
		/* mount according to registered callback if exited node found */
		ret = fiq_register_for_exist_node(fiq_node, fiq_callback_func);
	}
	return ret;
}

#ifdef CONFIG_RTOS_EARLY_KBOX
static int fiq_glue_ekbox_buffer_init(void)
{
	if (ekbox_pcontent) {
		memset(ekbox_pcontent, 0x20, EKBOX_RESERVE_SIZE - 1);
		memset(ekbox_pcontent + EKBOX_RESERVE_SIZE - 1, '\n', 1);
	}
	return 0;
}

static int fiq_glue_notifier_event(struct notifier_block *this, unsigned long action, void *unused)
{
	pr_info("fiq-glue ekbox buffer init\n");
	return fiq_glue_ekbox_buffer_init();
}

static struct notifier_block fiq_glue_notifier = {
	.notifier_call = fiq_glue_notifier_event,
};

static int __init fiq_glue_ekbox_init(void)
{
	(void)register_ekbox_reserved_mem_format_notifier(&fiq_glue_notifier);
	return fiq_glue_ekbox_buffer_init();
}

arch_initcall(fiq_glue_ekbox_init);
#endif

