// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright @ Huawei Technologies Co., Ltd. 2022-2022. ALL rights reversed.
 */
#ifdef CONFIG_EULEROS_LWT

#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <asm/current.h>
#include <asm/stacktrace.h>
#include <asm/lwt_stacktrace.h>

#define LWT_UNREGISTER	(1)
#define LWT_REGISTER	(0)
#define LWT_KSTACK_PER_LINE_PRINT	(32)
#define LWT_MAX_KSTACK_LINE_PRINT	(150)
#define LWT_MAX_KSTACK_PRINT		(LWT_KSTACK_PER_LINE_PRINT * LWT_MAX_KSTACK_LINE_PRINT)

static DEFINE_SPINLOCK(lwt_spinlock);
static int lwt_unregister_flag = LWT_UNREGISTER;
static atomic_t lwt_ops_ref = ATOMIC_INIT(0);
static struct lwt_stacktrace_ops lwt_stacktrace_ops_s = {NULL};

int crashing_cpu;

void save_crashing_cpu(void)
{
	crashing_cpu = smp_processor_id();
}

/*
 * register LWT stack trace operations structure.
 */
int register_lwt_work(struct lwt_stacktrace_ops *ops)
{
	if (ops == NULL)
		return RET_LWT_ERR;

	spin_lock(&lwt_spinlock);
	if (lwt_unregister_flag == LWT_REGISTER) {
		spin_unlock(&lwt_spinlock);
		pr_err("lwt stacktrace operation has already registered\n");
		return RET_LWT_ERR;
	}

	if (ops->is_lwt_stack == NULL || ops->lwt_stask_walk_all == NULL) {
		spin_unlock(&lwt_spinlock);
		pr_err("lwt stacktrace operation parameter is NULL\n");
		return RET_LWT_ERR;
	}

	memcpy(&lwt_stacktrace_ops_s, ops, sizeof(struct lwt_stacktrace_ops));
	lwt_unregister_flag = LWT_REGISTER;
	mb(); /* magic */
	spin_unlock(&lwt_spinlock);

	pr_info("register lwt stacktrace operation success\n");
	return RET_LWT_OK;
}
EXPORT_SYMBOL(register_lwt_work);

/*
 * unregister LWT stack trace operations structure.
 */
void unregister_lwt_work(void)
{
	spin_lock(&lwt_spinlock);
	if (lwt_unregister_flag == LWT_UNREGISTER) {
		spin_unlock(&lwt_spinlock);
		pr_info("lwt stacktrace operation not registered yet\n");
		return;
	}
	lwt_unregister_flag = LWT_UNREGISTER;

	mb(); /* magic */

	/* wait lwt dump_trace to work end */
	while (atomic_read(&lwt_ops_ref))
		mdelay(1);

	memset(&lwt_stacktrace_ops_s, 0, sizeof(struct lwt_stacktrace_ops));
	spin_unlock(&lwt_spinlock);
	pr_info("unregister lwt stacktrace operation success\n");
}
EXPORT_SYMBOL(unregister_lwt_work);

struct lwt_stacktrace_ops *get_lwt_ops(void)
{
	return &lwt_stacktrace_ops_s;
}
EXPORT_SYMBOL(get_lwt_ops);

/*
 * wapper function to check if current in LWT stack.
 * return value: RET_LWT_OK - in LWT stack,
 *				RET_LWT_ERR - not in LWT stack
 */
int lwt_work(unsigned long stack)
{
	int ret = 0;

	mb(); /* magic */
	if (lwt_unregister_flag == LWT_UNREGISTER)
		return RET_LWT_ERR;

	ret = lwt_stacktrace_ops_s.is_lwt_stack(stack);
	return ret;
}
EXPORT_SYMBOL(lwt_work);

void lwt_add_ref(void)
{
	atomic_inc(&lwt_ops_ref);
}
EXPORT_SYMBOL(lwt_add_ref);

void lwt_del_ref(void)
{
	atomic_dec(&lwt_ops_ref);
}
EXPORT_SYMBOL(lwt_del_ref);

/*
 * wapper function to check if address in kernel text range.
 */
int lwt_kernel_text_address(const unsigned long addr)
{
	return __kernel_text_address(addr);
}
EXPORT_SYMBOL(lwt_kernel_text_address);

/*
 * walk all lwt stack for livepatch
 *
 * @fn: function pointer
 * @data: parameter pass to fn
 * return value: RET_LWT_OK  - lwt stack dumped,
 *				 RET_LWT_ERR - dump stack not dumped
 */
void lwt_klp_walk_stackframe(int (*fn)(struct stackframe *, void *), void *data)
{
	lwt_add_ref();
	mb(); /* magic */
	if (lwt_unregister_flag == LWT_UNREGISTER) {
		lwt_del_ref();
		pr_err("lwt stacktrace operation not registered yet\n");
		return;
	}

	if (lwt_stacktrace_ops_s.lwt_stask_walk_all)
		lwt_stacktrace_ops_s.lwt_stask_walk_all(fn, data);
	else
		pr_err("lwt stacktrace not registered yet. walk all lwt failed.\n");

	lwt_del_ref();
}
EXPORT_SYMBOL(lwt_klp_walk_stackframe);

int klp_check_lwt_jump_func(struct stackframe *frame, void *data)
{
	if (!lwt_klp_check_jump_func(data, frame->pc))
		return -EBUSY;
	return 0;
}

#endif /* CONFIG_EULEROS_LWT */

