/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2020-2024.
 * Description: RTOS Process Context for RPC
 * Create: 2020-08-15
 */
#include <linux/sched.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/task_struct_extend.h>
#include <linux/tasklock.h>
#include <linux/scs.h>
#include <linux/rtos_rtpc.h>
#include <linux/rtos_rtpc_rpc.h>
#include <linux/kasan.h>
#include <linux/proc_fs.h>
#include <asm/rtos_rtpc.h>
#include <asm/rtos_rtpc_rpc.h>
#include <asm/exception.h>

#include "../kernel/sched/sched.h"
#include "../kernel/audit.h"

#define rpc_debug(fmt, args...) \
	do { \
		if (unlikely(sysctl_rpc_kernel_debug)) \
			pr_info("[RPC KERNEL][DEBUG][%s][%d]: "fmt, __func__, __LINE__, ##args); \
	} while (0)

#define rpc_err(fmt, args...) \
	pr_err("[RPC KERNEL][ERROR][%s][%d]: "fmt, __func__, __LINE__, ##args)

#define rpc_alert(fmt, args...) \
	pr_alert("[RPC KERNEL][ALERT][%s][%d]: "fmt, __func__, __LINE__, ##args)

#define rpc_emerg(fmt, args...) \
	pr_emerg("[RPC KERNEL][EMERG][%s][%d]: "fmt, __func__, __LINE__, ##args)

enum rpc_exception_type {
	EXCEPTION_NONE = 0, /* non rpc exception, just skip */
	EXCEPTION_RET_EXP = 1, /* ret call after fixup exception */
	EXCEPTION_FIXUP = 2, /* fixup exception */
	EXCEPTION_RET_FAST = 3, /* ret call fast */
	EXCEPTION_CALL = 4 /* do call fast */
};

enum rpc_cmd {
	CMD_BASE = 0xff2d0000,
	CMD_SERVER_INIT,
	CMD_SERVER_EXIT,
	CMD_SERVER_THREAD_BIND,
	CMD_SERVER_THREAD_UNBIND,
	CMD_SERVER_THREAD_DESTROY,
	CMD_CLIENT_INIT,
	CMD_CLIENT_EXIT,
	CMD_CLIENT_THREAD_INIT,
	CMD_CLIENT_THREAD_EXIT,
	CMD_CLIENT_THREAD_BIND,
	CMD_CLIENT_THREAD_UNBIND,
	CMD_DO_CALL,
	CMD_WAIT_AND_RET_CALL,
	CMD_MAX
};

#define MAX_BIND_NUM	32

typedef int (*rpc_cmd_func_t)(struct file *filp, unsigned long arg, struct task_struct *tsk);

struct rpc_cmd_node_s {
	unsigned long cmd;
	const char *cmd_name;
	rpc_cmd_func_t cmd_func;
};

struct rpc_param {
	void *arg1;
	void *arg2;
	void *arg3;
	void *arg4;
};

struct rpc_data {
	unsigned int bind_id;
	unsigned int func_id;
	struct rpc_param param;
	void *ret;
	int need_exit;
};

struct thread_save_info {
	const void *save_context;
	const void *save_kstack;
	size_t kstack_size;
	void *start_sp;
#ifdef CONFIG_SHADOW_CALL_STACK
	// shadow call stack
	const void *save_scs;
	size_t scs_size;
#endif
#ifdef CONFIG_KASAN
	// kasan shadow mem of kstack
	const void *save_kstack_kasan;
	size_t kstack_kasan_size;
#endif
};

// current bind state
enum bind_state {
	// initial state: when server call server_thread_bind, and create rpc_bind_info
	BIND_INFO_CREATED,
	// server is initialized and sleep, at this time client can save the server context
	SERVER_WAITED,
	// the first time client call do_call, it will sleep,
	// waiting for server to save client's context
	CLIENT_NEED_SAVE,
	// client's context is saved
	CLIENT_SAVED,
	// bind_info is inited
	BIND_INFO_INITED
};

struct rpc_bind_info {
	unsigned int session_id;
	int cpu;

	struct task_struct *client_task;
	struct task_struct *server_task;
	int init_state;

	struct task_struct *server_task_get;

	struct thread_save_info server_thread_save;
	struct thread_save_info client_thread_save;

	int is_calling;

	int need_exit;

	struct kref ref;

	spinlock_t lock;

	struct list_head node;
};

struct rpc_bind_info_ptr {
	struct rpc_bind_info *bind_info[MAX_BIND_NUM];
	DECLARE_BITMAP(bind_info_map, MAX_BIND_NUM);
};

unsigned int sysctl_rpc_kernel_enable;
static unsigned int sysctl_rpc_kernel_debug;

static LIST_HEAD(bind_info_list);
static struct spinlock bind_info_list_lock;
static DEFINE_MUTEX(rpc_mutex);

// DFX : do_call times, fixup times, fixup type, etc
#define STAT_SYSCNO_MAX 300
#define STAT_IRQ_MAX 200
struct rpc_stat_info {
	// do_call times
	size_t nr_call;
	// fixup times
	// nr_fixup == nr_fixup_irq + nr_fixup_syscall + nr_fixup_exception
	size_t nr_fixup;
	// fixup times by interrupt
	size_t nr_fixup_irq;
	// fixup times by syscall
	size_t nr_fixup_syscall;
	// fixup times by other exception (like page fault)
	size_t nr_fixup_exception;
	// fixup times by syscall: table
	size_t syscall_table[STAT_SYSCNO_MAX + 1];
	// fixup times by interrupt: table
	size_t irq_table[STAT_IRQ_MAX + 1];
};

static unsigned int sysctl_rpc_kernel_stats;
DEFINE_PER_CPU(struct rpc_stat_info *, cpu_rpc_stat);

static void do_rtpc_signal(void *info);

static inline int check_cpu(struct task_struct *tsk1, struct task_struct *tsk2, int cpu)
{
	if (unlikely(task_cpu(tsk1) != cpu))
		return -1;

	if (unlikely(task_cpu(tsk2) != cpu))
		return -1;

	if (unlikely(cpumask_weight(tsk1->cpus_ptr) != 1))
		return -1;

	if (unlikely(cpumask_weight(tsk2->cpus_ptr) != 1))
		return -1;

	if (unlikely(cpu != smp_processor_id()))
		return -1;

	return 0;
}

static int check_cpu_in_bind(struct task_struct *const tsk)
{
	int cpu;

	preempt_disable();
	cpu = smp_processor_id();

	if (cpumask_weight(tsk->cpus_ptr) != 1) {
		cpu = -1;
		rpc_err("allow cpus != 1\n");
	} else if (!cpumask_test_cpu(cpu, tsk->cpus_ptr)) {
		cpu = -1;
		rpc_emerg("unexpected cpu not on cpus_ptr\n");
	}

	preempt_enable_no_resched();
	return cpu;
}

static void rpc_free_thread_save(struct thread_save_info *const thread_save)
{
	kfree(thread_save->save_context);
	kfree(thread_save->save_kstack);
	thread_save->save_context = NULL;
	thread_save->save_kstack = NULL;
#ifdef CONFIG_SHADOW_CALL_STACK
	kfree(thread_save->save_scs);
	thread_save->save_scs = NULL;
#endif
#ifdef CONFIG_KASAN
	kfree(thread_save->save_kstack_kasan);
	thread_save->save_kstack_kasan = NULL;
#endif
}

static void bind_info_release(struct kref *ref)
{
	struct rpc_bind_info *bind_info = container_of(ref, struct rpc_bind_info, ref);

	list_del(&bind_info->node);

	rpc_free_thread_save(&bind_info->server_thread_save);
	rpc_free_thread_save(&bind_info->client_thread_save);
	kfree(bind_info);

	rpc_debug("%s/%d free bind_info success\n", current->comm, current->pid);
}

static noinline void bind_info_kref_init(struct rpc_bind_info *bind_info)
{
	spin_lock(&bind_info_list_lock);
	kref_init(&bind_info->ref);
	list_add(&bind_info->node, &bind_info_list);
	spin_unlock(&bind_info_list_lock);

	rpc_debug("[%pS] %s/%d after init kref, refcount = %d\n",
			__builtin_return_address(0), current->comm, current->pid,
			kref_read(&bind_info->ref));
}

static noinline void bind_info_kref_get(struct rpc_bind_info *bind_info)
{
	kref_get(&bind_info->ref);

	rpc_debug("[%pS] %s/%d after get kref, refcount = %d\n",
			__builtin_return_address(0), current->comm, current->pid,
			kref_read(&bind_info->ref));
}

static noinline void bind_info_kref_put(struct rpc_bind_info *bind_info)
{
	int ref;

	spin_lock(&bind_info_list_lock);
	ref = kref_read(&bind_info->ref);
	kref_put(&bind_info->ref, bind_info_release);
	spin_unlock(&bind_info_list_lock);

	rpc_debug("[%pS] %s/%d after put kref, refcount = %d\n",
			__builtin_return_address(0), current->comm, current->pid, ref - 1);
}

static inline void bind_info_lock(struct rpc_bind_info *bind_info)
{
	spin_lock(&bind_info->lock);
}

static inline void bind_info_unlock(struct rpc_bind_info *bind_info)
{
	spin_unlock(&bind_info->lock);
}

static inline void rpc_mutex_lock(void)
{
	mutex_lock(&rpc_mutex);
}

static inline void rpc_mutex_unlock(void)
{
	mutex_unlock(&rpc_mutex);
}

static int rpc_get_server_task(struct rpc_bind_info *bind_info)
{
	struct task_struct *server_task = bind_info->server_task;

	if (!try_get_task_stack(server_task))
		return -ENOENT;

	get_task_struct(server_task);
	bind_info->server_task_get = server_task;

	return 0;
}

static void rpc_put_server_task(struct rpc_bind_info *bind_info)
{
	struct task_struct *server_task_get = bind_info->server_task_get;

	put_task_stack(server_task_get);
	put_task_struct(server_task_get);
}

static void rpc_set_data_page(struct task_struct *tsk, void *data_page)
{
	struct rtos_task_struct *rtos_task = task_to_rtos_task(tsk);

	rtos_task->rpc_task.data_page = data_page;
}

static inline void *rpc_get_data_page(struct task_struct *tsk)
{
	struct rtos_task_struct *rtos_task = task_to_rtos_task(tsk);

	return rtos_task->rpc_task.data_page;
}

static void rpc_free_data_page(struct task_struct *tsk)
{
	void *data_page;

	data_page = rpc_get_data_page(tsk);
	if (data_page) {
		rpc_set_data_page(tsk, NULL);
		ClearPageReserved(virt_to_page(data_page));
		free_page((uintptr_t)data_page);
	}
}

static bool rpc_check_data_bind_id(struct task_struct *tsk)
{
	void *data_page;
	struct rpc_data *data;

	data_page = rpc_get_data_page(tsk);
	if (!data_page) {
		rpc_err("%s/%d get bind_id failed\n", tsk->comm, tsk->pid);
		return false;
	}

	data = data_page;
	if (data->bind_id != 0) {
		rpc_err("%s/%d bind_id is invalid\n", tsk->comm, tsk->pid);
		return false;
	}

	return true;
}

static inline void rpc_switch_data(struct task_struct *prev, struct task_struct *next)
{
	void *prev_data_page = rpc_get_data_page(prev);
	void *next_data_page = rpc_get_data_page(next);

	if (unlikely(!prev_data_page) || unlikely(!next_data_page))
		return;

	memcpy(next_data_page, prev_data_page, sizeof(struct rpc_data));
}

static void rpc_set_client_save_regs(struct task_struct *tsk, void *regs)
{
	struct rtos_task_struct *rtos_task = task_to_rtos_task(tsk);

	rtos_task->rpc_task.client_save_regs = regs;
}

static inline void *rpc_get_client_save_regs(struct task_struct *tsk)
{
	struct rtos_task_struct *rtos_task = task_to_rtos_task(tsk);

	return rtos_task->rpc_task.client_save_regs;
}

static __always_inline void rpc_free_client_save_regs(struct task_struct *tsk)
{
	void *save_regs;

	save_regs = rpc_get_client_save_regs(tsk);
	if (save_regs) {
		rpc_set_client_save_regs(tsk, NULL);
		kfree(save_regs);
	}
}

static void rpc_set_bind_info(struct task_struct *tsk, struct rpc_bind_info *bind_info)
{
	struct rtos_task_struct *rtos_task = task_to_rtos_task(tsk);

	rtos_task->rpc_task.bind_info = bind_info;
}

static inline struct rpc_bind_info *rpc_get_bind_info(struct task_struct *tsk)
{
	struct rtos_task_struct *rtos_task = task_to_rtos_task(tsk);

	return rtos_task->rpc_task.bind_info;
}

static __always_inline void rpc_free_bind_info(struct task_struct *tsk)
{
	struct rpc_bind_info *bind_info;

	bind_info = rpc_get_bind_info(tsk);
	if (bind_info) {
		bind_info_lock(bind_info);
		if (bind_info->server_task == tsk) {
			bind_info->server_task = NULL;
			rpc_alert("set need_exit when server exit\n");
			bind_info->need_exit = 1;
			if (bind_info->client_task && bind_info->is_calling)
				wake_up_state(bind_info->client_task, TASK_RTPC_UNWAKEABLE);
			bind_info_unlock(bind_info);
			bind_info_kref_put(bind_info);
			rpc_set_bind_info(tsk, NULL);
			return;
		}
		bind_info_unlock(bind_info);
	}
}

static void rpc_set_bind_info_ptr(struct task_struct *tsk, struct rpc_bind_info_ptr *bind_info_ptr)
{
	struct rtos_task_struct *rtos_task = task_to_rtos_task(tsk);

	rtos_task->rpc_task.bind_info_ptr = bind_info_ptr;
}

static inline struct rpc_bind_info_ptr *rpc_get_bind_info_ptr(struct task_struct *tsk)
{
	struct rtos_task_struct *rtos_task = task_to_rtos_task(tsk);

	return rtos_task->rpc_task.bind_info_ptr;
}

static __always_inline void rpc_free_bind_info_ptr(struct task_struct *tsk)
{
	unsigned int bit;
	struct rpc_bind_info *bind_info;
	struct rpc_bind_info_ptr *bind_info_ptr;

	bind_info_ptr = rpc_get_bind_info_ptr(tsk);
	if (bind_info_ptr) {
		for_each_set_bit(bit, bind_info_ptr->bind_info_map, MAX_BIND_NUM) {
			bind_info = bind_info_ptr->bind_info[bit];
			bind_info_lock(bind_info);
			bind_info->client_task = NULL;
			rpc_alert("set need_exit when client exit\n");
			bind_info->need_exit = 1;
			if (bind_info->server_task)
				wake_up_state(bind_info->server_task, TASK_RTPC_UNWAKEABLE);
			rpc_put_server_task(bind_info);
			bind_info_unlock(bind_info);
			bind_info_kref_put(bind_info);
		}
		kfree(bind_info_ptr);
		rpc_set_bind_info_ptr(tsk, NULL);
	}
}

static unsigned int rpc_find_bind_id(const struct rpc_bind_info_ptr *bind_info_ptr)
{
	return find_first_zero_bit(bind_info_ptr->bind_info_map, MAX_BIND_NUM);
}

static void rpc_set_bind_id(struct rpc_bind_info_ptr *bind_info_ptr,
		unsigned int bind_id, struct rpc_bind_info *bind_info)
{
	bind_info_ptr->bind_info[bind_id] = bind_info;
	set_bit(bind_id, bind_info_ptr->bind_info_map);
}

static struct rpc_bind_info *rpc_find_bind_info(unsigned int session_id)
{
	struct rpc_bind_info *bind_info;

	list_for_each_entry(bind_info, &bind_info_list, node) {
		if (bind_info->session_id == session_id)
			return bind_info;
	}

	return NULL;
}

static struct rpc_bind_info *rpc_find_get_bind_info(unsigned int session_id)
{
	struct rpc_bind_info *bind_info;

	spin_lock(&bind_info_list_lock);
	bind_info = rpc_find_bind_info(session_id);
	if (bind_info)
		bind_info_kref_get(bind_info);
	spin_unlock(&bind_info_list_lock);

	return bind_info;
}

void rpc_mm_release(struct task_struct *tsk)
{
	rpc_free_bind_info(tsk);
	rpc_free_bind_info_ptr(tsk);
	rpc_free_data_page(tsk);
	rpc_free_client_save_regs(tsk);
}

static int rpc_save_thread(struct task_struct *const tsk,
			   struct thread_save_info *const thread_save)
{
	void *const save_context = rtpc_alloc_cpu_context();

	void *const start_sp = (void *)thread_saved_sp(tsk);
	const size_t kstack_size = tsk->stack + THREAD_SIZE - start_sp;
	void *const save_kstack = kcalloc(1, kstack_size, GFP_ATOMIC);
#ifdef CONFIG_SHADOW_CALL_STACK
	const size_t scs_size = (char *)task_scs_sp(tsk) - (char *)task_scs(tsk);
	void *const save_scs = kcalloc(1, scs_size, GFP_ATOMIC);
#endif
#ifdef CONFIG_KASAN
	const size_t kstack_kasan_size = (char *)kasan_mem_to_shadow((char *)task_pt_regs(tsk) - 1)
					 - (char *)kasan_mem_to_shadow(start_sp) + 1;
	void *const save_kstack_kasan = kcalloc(1, kstack_kasan_size, GFP_ATOMIC);
#endif

	if (!save_context || !save_kstack)
		goto label_nomem;
#ifdef CONFIG_SHADOW_CALL_STACK
	if (!save_scs)
		goto label_nomem;
#endif
#ifdef CONFIG_KASAN
	if (!save_kstack_kasan)
		goto label_nomem;
#endif

	// save kernel stack
#ifndef CONFIG_KASAN
	memcpy(save_kstack, start_sp, kstack_size);
#else
	__memcpy(save_kstack, start_sp, kstack_size);
#endif

	// save kernel shadow stack
#ifdef CONFIG_SHADOW_CALL_STACK
#ifndef CONFIG_KASAN
	memcpy(save_scs, (void *)((char *)task_scs(tsk) + sizeof(void *)), scs_size);
#else
	__memcpy(save_scs, (void *)((char *)task_scs(tsk) + sizeof(void *)), scs_size);
#endif
#endif

	// save kasan shadow mem of kernel stack
#ifdef CONFIG_KASAN
	__memcpy(save_kstack_kasan, kasan_mem_to_shadow(start_sp), kstack_kasan_size);
#endif

	rtpc_save_cpu_context(tsk, save_context);

	thread_save->save_context = save_context;

	thread_save->save_kstack = save_kstack;
	thread_save->kstack_size = kstack_size;
	thread_save->start_sp = start_sp;

#ifdef CONFIG_SHADOW_CALL_STACK
	thread_save->save_scs = save_scs;
	thread_save->scs_size = scs_size;
#endif

#ifdef CONFIG_KASAN
	thread_save->kstack_kasan_size = kstack_kasan_size;
	thread_save->save_kstack_kasan = save_kstack_kasan;
#endif

	return 0;

label_nomem:
	kfree(save_context);
	kfree(save_kstack);
#ifdef CONFIG_SHADOW_CALL_STACK
	kfree(save_scs);
#endif
#ifdef CONFIG_KASAN
	kfree(save_kstack_kasan);
#endif
	return -ENOMEM;
}

static void rpc_recover_thread(struct task_struct *const tsk,
			       struct thread_save_info *const thread_save)
{
#ifdef CONFIG_SHADOW_CALL_STACK
	void *const scs_base = task_scs(tsk);
	const size_t scs_size = thread_save->scs_size;
#endif

	rtpc_recover_task_struct(tsk);
	rtpc_recover_task_stack(thread_save->start_sp,
				thread_save->save_kstack, thread_save->kstack_size);

	rtpc_recover_cpu_context(tsk, thread_save->save_context);
#ifdef CONFIG_SHADOW_CALL_STACK
	task_scs_sp(tsk) = (char *)scs_base + scs_size;
#ifndef CONFIG_KASAN
	memcpy((void *)((char *)scs_base + sizeof(void *)), thread_save->save_scs, scs_size);
#else
	__memcpy((void *)((char *)scs_base + sizeof(void *)), thread_save->save_scs, scs_size);
#endif
#endif

#ifdef CONFIG_KASAN
	__memcpy(kasan_mem_to_shadow(thread_save->start_sp),
		 thread_save->save_kstack_kasan, thread_save->kstack_kasan_size);
#endif
}

asmlinkage void rpc_recover_server_thread(void)
{
	struct task_struct *client_task;
	struct task_struct *server_task;
	struct rpc_bind_info *bind_info;

	client_task = current;
	bind_info = rpc_get_bind_info(client_task);
	server_task = bind_info->server_task;

	rpc_recover_thread(server_task, &bind_info->server_thread_save);
}

asmlinkage void rpc_recover_client_thread(void)
{
	struct task_struct *client_task;
	struct task_struct *server_task;
	struct rpc_bind_info *bind_info;

	server_task = current;
	bind_info = rpc_get_bind_info(server_task);
	client_task = bind_info->client_task;

	rpc_recover_thread(client_task, &bind_info->client_thread_save);
}

static int rpc_server_init(struct file *filp, unsigned long arg, struct task_struct *tsk)
{
	if (!capable(CAP_SYS_ADMIN)) {
		rpc_err("permission check failed\n");
		return -EACCES;
	}

	return 0;
}

static int rpc_server_thread_bind(struct file *filp, unsigned long arg, struct task_struct *tsk)
{
	int ret = 0;
	struct rpc_bind_info *bind_info;
	unsigned int session_id;
	int cpu;

	if (!capable(CAP_SYS_ADMIN)) {
		rpc_err("permission check failed\n");
		return -EPERM;
	}

	if (get_user(session_id, (unsigned int __user *)arg))
		return -EFAULT;

	if (rpc_get_bind_info(tsk) || rpc_get_bind_info_ptr(tsk))
		return -EEXIST;

	cpu = check_cpu_in_bind(tsk);
	if (cpu < 0) {
		rpc_err("CPU affinity check failed!\n");
		return -EPERM;
	}

	rpc_mutex_lock();
	bind_info = rpc_find_get_bind_info(session_id);
	if (bind_info) {
		bind_info_kref_put(bind_info);
		rpc_err("create already with session_id = %u\n", session_id);
		ret = -EEXIST;
		goto unlock;
	}

	bind_info = kcalloc(1, sizeof(struct rpc_bind_info), GFP_KERNEL);
	if (!bind_info) {
		rpc_err("alloc rpc_bind_info failed\n");
		ret = -ENOMEM;
		goto unlock;
	}

	bind_info->session_id = session_id;
	bind_info->server_task = tsk;
	bind_info->cpu = cpu;

	spin_lock_init(&bind_info->lock);
	bind_info_kref_init(bind_info);

	INIT_CSD(&task_to_rtos_task(tsk)->rpc_task.csd, do_rtpc_signal, tsk);
	/* smp_mb:
	 * We need to ensure the order:
	 * 1. INIT_CSD
	 * 2. set_bind_info
	 *
	 * because, in signal_wake_up_state, we:
	 * 1. check bind_info
	 * 2. send IPI (read CSD)
	 */
	smp_mb();
	rpc_set_bind_info(tsk, bind_info);

	tsk->flags |= PF_NO_SETAFFINITY;

	rpc_debug("%s/%d bind success, session_id = %u\n", tsk->comm, tsk->pid, session_id);

unlock:
	rpc_mutex_unlock();

	return ret;
}

static int rpc_client_init(struct file *filp, unsigned long arg, struct task_struct *tsk)
{
	if (!capable(CAP_SYS_ADMIN)) {
		rpc_err("permission check failed\n");
		return -EPERM;
	}

	return 0;
}

static int rpc_client_thread_init(struct file *filp, unsigned long arg, struct task_struct *tsk)
{
	void *save_regs;
	struct rpc_bind_info_ptr *bind_info_ptr;

	if (!capable(CAP_SYS_ADMIN)) {
		rpc_err("permission check failed\n");
		return -EPERM;
	}

	if (rpc_get_bind_info_ptr(tsk) || rpc_get_bind_info(tsk))
		return -EEXIST;

	save_regs = kcalloc(1, sizeof(struct pt_regs), GFP_KERNEL);
	if (!save_regs)
		return -ENOMEM;

	bind_info_ptr = kcalloc(1, sizeof(struct rpc_bind_info_ptr), GFP_KERNEL);
	if (!bind_info_ptr) {
		kfree(save_regs);
		return -ENOMEM;
	}

	rpc_set_client_save_regs(tsk, save_regs);
	rpc_set_bind_info_ptr(tsk, bind_info_ptr);

	rpc_debug("%s/%d init success\n", tsk->comm, tsk->pid);
	return 0;
}

static int rpc_client_thread_bind(struct file *filp, unsigned long arg, struct task_struct *tsk)
{
	int ret = 0;
	unsigned int bind_id;
	struct task_struct *server_task;
	struct rpc_bind_info *bind_info;
	struct rpc_bind_info_ptr *bind_info_ptr;
	unsigned int session_id;
	int cpu;

	if (get_user(session_id, (unsigned int __user *)arg))
		return -EFAULT;

	bind_info_ptr = rpc_get_bind_info_ptr(tsk);
	if (!bind_info_ptr)
		return -ENOENT;

	bind_id = rpc_find_bind_id(bind_info_ptr);
	if (bind_id != 0) {
		rpc_err("client already bind\n");
		return -EEXIST;
	}

	cpu = check_cpu_in_bind(tsk);
	if (cpu < 0) {
		rpc_err("CPU affinity check failed!\n");
		return -EPERM;
	}

	rpc_mutex_lock();
	bind_info = rpc_find_get_bind_info(session_id);
	if (!bind_info) {
		rpc_err("not found bind_info with session_id = %u\n", session_id);
		rpc_mutex_unlock();
		return -ENOENT;
	}

	bind_info_lock(bind_info);

	if (cpu != bind_info->cpu) {
		rpc_err("client bind cpu check failed!\n");
		ret = -EPERM;
		goto out;
	}

	if (bind_info->client_task) {
		rpc_err("bind already with session_id = %u\n", session_id);
		ret = -EEXIST;
		goto out;
	}

	if (bind_info->need_exit) {
		rpc_err("server thread exited\n");
		ret = -ESRCH;
		goto out;
	}

	server_task = bind_info->server_task;

	if (same_thread_group(tsk, server_task)) {
		rpc_err("client and server in same process\n");
		ret = -EPERM;
		goto out;
	}

	if (bind_info->init_state != SERVER_WAITED) {
		rpc_err("Server has not completed initialization, please try again!\n");
		ret = -EAGAIN;
		goto out;
	}

	ret = rpc_get_server_task(bind_info);
	if (ret) {
		rpc_err("get server task failed\n");
		goto out;
	}

	ret = rpc_save_thread(server_task, &bind_info->server_thread_save);
	if (ret) {
		rpc_put_server_task(bind_info);
		rpc_err("save server thread failed\n");
		ret = -EFAULT;
		goto out;
	}

	bind_info->client_task = tsk;
	rpc_set_bind_id(bind_info_ptr, bind_id, bind_info);

	bind_info_unlock(bind_info);
	rpc_mutex_unlock();

	INIT_CSD(&task_to_rtos_task(tsk)->rpc_task.csd, do_rtpc_signal, tsk);
	/* smp_mb:
	 * We need to ensure the order:
	 * 1. INIT_CSD
	 * 2. set_bind_info
	 *
	 * because, in signal_wake_up_state, we:
	 * 1. check bind_info
	 * 2. send IPI (read CSD)
	 */
	smp_mb();
	rpc_set_bind_info(tsk, bind_info);

	rpc_debug("%s/%d bind success, bind_id = %u\n", tsk->comm, tsk->pid, bind_id);
	return bind_id;
out:
	bind_info_unlock(bind_info);
	bind_info_kref_put(bind_info);
	rpc_mutex_unlock();
	return ret;
}

static int do_call_fast_prepare(struct task_struct *tsk, void **const p_client_save_regs,
				struct rpc_bind_info **const p_bind_info)
{
	void *client_save_regs;
	struct rpc_bind_info *bind_info;
	struct rpc_bind_info_ptr *bind_info_ptr;

	client_save_regs = rpc_get_client_save_regs(tsk);
	bind_info_ptr = rpc_get_bind_info_ptr(tsk);
	if (unlikely(!client_save_regs) || unlikely(!bind_info_ptr)) {
		rpc_err("%s/%d is not init\n", tsk->comm, tsk->pid);
		return -EINVAL;
	}

	if (unlikely(!rpc_check_data_bind_id(tsk)))
		return -EINVAL;

	bind_info = bind_info_ptr->bind_info[0];
	if (unlikely(!bind_info)) {
		rpc_err("%s/%d is not bind\n", tsk->comm, tsk->pid);
		return -EINVAL;
	}

	if (unlikely(!fair_policy(tsk->policy))) {
		rpc_err("only support cfs task\n");
		return -EACCES;
	}

	if (unlikely(get_tasklock_page(tsk))) {
		rpc_err("must enable user preemption\n");
		return -EACCES;
	}

	*p_client_save_regs = client_save_regs;
	*p_bind_info = bind_info;

	return 0;
}

static int rpc_check_server_task(struct task_struct *tsk)
{
	if (unlikely(!tsk)) {
		rpc_err("server is not exist\n");
		return -ESRCH;
	}

	if (unlikely(!fair_policy(tsk->policy))) {
		rpc_err("only support cfs task\n");
		return -EACCES;
	}

	return 0;
}

__attribute__((cold))
static noinline int rpc_wait_call_fast(struct task_struct *const tsk,
				       struct rpc_bind_info *const bind_info)
{
	int ret;

	BUG_ON(tsk->audit_context && tsk->audit_context->in_syscall != 0);

	BUG_ON(!irqs_disabled());

	rpc_debug("%s/%d wait call fast start\n", tsk->comm, tsk->pid);

	if (check_cpu(tsk, tsk, bind_info->cpu) != 0) {
		rpc_emerg("unexpected cpu migration!\n");
		return -EFAULT;
	}

	// need_exit is already check in do_exception

	if (bind_info->init_state != BIND_INFO_CREATED) {
		rpc_emerg("unexpected: init_state = %d, set need_exit\n", bind_info->init_state);
		bind_info->need_exit = 1;
		return -EFAULT;
	}

	bind_info->init_state = SERVER_WAITED;

	enter_from_user_mode();
	preempt_disable();

sleep_preempt_disabled:
	set_current_state(TASK_RTPC_UNWAKEABLE);
	// irq will be enable in __schedule()
	rtpc_schedule_preempt_disabled();
	// irq is on
	local_irq_disable();

	if (bind_info->init_state == CLIENT_NEED_SAVE) {
		if (rpc_save_thread(bind_info->client_task, &bind_info->client_thread_save)) {
			rpc_alert("set need_exit: failed to save client context!\n");
			bind_info->need_exit = 1;
		}
		bind_info->init_state = CLIENT_SAVED;
		wake_up_state(bind_info->client_task, TASK_RTPC_UNWAKEABLE);

		if (bind_info->need_exit) {
			rpc_alert("need exit, server not sleep\n");
			ret = -ENOLINK;
			goto exit_preempt_disabled;
		}

		goto sleep_preempt_disabled;
	}

	rpc_alert("set need_exit: server wake up kill!\n");
	bind_info->need_exit = 1;

	ret = -ENOLINK;

exit_preempt_disabled:
	preempt_enable_no_resched();
	exit_to_user_mode();

	rpc_debug("%s/%d wait call slow end\n", tsk->comm, tsk->pid);
	return ret;
}

__attribute__((cold))
static noinline int save_client_context(struct rpc_bind_info *const bind_info,
					struct task_struct *const server_task,
					struct task_struct *const client_task,
					struct pt_regs *const client_regs,
					void *const client_save_regs)
{
	if (bind_info->init_state != SERVER_WAITED) {
		rpc_emerg("unexpected bind_info->init_state != SERVER_WAITED\n");
		bind_info_unlock(bind_info);
		return -EFAULT;
	}

	bind_info->init_state = CLIENT_NEED_SAVE;
	wake_up_state(server_task, TASK_RTPC_UNWAKEABLE);
	set_current_state(TASK_RTPC_UNWAKEABLE);

	bind_info_unlock(bind_info);

	enter_from_user_mode();
	preempt_disable();
	// irq will be enable in __schedule()
	rtpc_schedule_preempt_disabled();
	// irq is on

	local_irq_disable();
	preempt_enable_no_resched();
	exit_to_user_mode();

	if (bind_info->init_state == CLIENT_SAVED) {
		bind_info_lock(bind_info);
		bind_info->init_state = BIND_INFO_INITED;
		if (bind_info->need_exit) {
			rpc_alert("client exit during save\n");
			bind_info_unlock(bind_info);
			return -ENOLINK;
		}
		// Continue with the do call process
		return 0;
	}

	if (bind_info->init_state != BIND_INFO_INITED)
		rpc_emerg("unexpected: init_state = %d\n", bind_info->init_state);
	if (client_task != current)
		rpc_emerg("unexpected: tsk != current\n");
	if (bind_info->is_calling != 1)
		rpc_emerg("unexpected: bind_info->is_calling != 1\n");
	if (bind_info->need_exit != 1)
		rpc_emerg("unexpected: bind_info->need_exit != 1\n");
	BUG_ON(client_task->audit_context && client_task->audit_context->in_syscall != 0);
	rpc_alert("client be wake kill!\n");
	rtpc_switch_pt_regs(client_regs, client_save_regs);
	return -ENOLINK;
}

static void rpc_do_call_fast(struct task_struct *const client_task)
{
	int ret;
	void *client_save_regs;
	struct pt_regs *client_regs;
	struct pt_regs *server_regs;
	struct task_struct *server_task;
	struct rpc_bind_info *bind_info;

	client_regs = task_pt_regs(client_task);

	ret = do_call_fast_prepare(client_task, &client_save_regs, &bind_info);
	if (ret)
		goto out;

	bind_info_lock(bind_info);
	server_task = bind_info->server_task;

	ret = rpc_check_server_task(server_task);
	if (ret)
		goto unlock;

	if (unlikely(bind_info->init_state != BIND_INFO_INITED)) {
		ret = save_client_context(bind_info, server_task, client_task,
					  client_regs, client_save_regs);
		if (ret)
			goto out;
	}

	ret = check_cpu(client_task, server_task, bind_info->cpu);
	if (ret) {
		rpc_emerg("unexpected cpu migration!\n");
		goto unlock;
	}

	server_regs = task_pt_regs(server_task);
	rtpc_switch_pt_regs(client_save_regs, client_regs);
	rtpc_switch_pt_regs(client_regs, server_regs);
	rtpc_context_switch_user(client_task, server_task);
	rpc_switch_data(client_task, server_task);
	bind_info->is_calling = 1;
	if (unlikely(sysctl_rpc_kernel_stats)) {
		struct rpc_stat_info *const rpc_stat = __this_cpu_read(cpu_rpc_stat);
		++rpc_stat->nr_call;
	}

unlock:
	bind_info_unlock(bind_info);
out:
	rtpc_set_return_value(client_regs, (unsigned long)ret);

	rpc_debug("[TYPE_4] do call end\n");
}

static void rpc_ret_call_fast(struct task_struct *tsk)
{
	struct rpc_bind_info *bind_info;
	void *client_save_regs;
	struct pt_regs *client_regs;
	struct task_struct *client_task;
	struct task_struct *server_task;

	client_task = tsk;
	client_regs = task_pt_regs(client_task);
	client_save_regs = rpc_get_client_save_regs(client_task);
	bind_info = rpc_get_bind_info(client_task);

	bind_info_lock(bind_info);
	server_task = bind_info->server_task;

	if (check_cpu(client_task, server_task, bind_info->cpu) != 0) {
		rpc_emerg("unexpected cpu migration!\n");
		BUG();
	}

	rtpc_switch_pt_regs(client_regs, client_save_regs);
	rtpc_set_current(server_task);
	rtpc_context_switch_user(server_task, client_task);
	rtpc_set_current(client_task);
	rpc_switch_data(server_task, client_task);
	bind_info->is_calling = 0;

	rtpc_set_return_value(client_regs, 0);

	bind_info_unlock(bind_info);
	rpc_debug("[TYPE_3] ret call end with no exception\n");
}

static void rpc_fixup_exception(struct task_struct *tsk, unsigned long is_el0_sync,
				const bool type_syscall, struct pt_regs *const client_regs,
				const int scno)
{
	struct rpc_bind_info *bind_info;
	struct pt_regs *server_regs;
	struct task_struct *client_task;
	struct task_struct *server_task;

	client_task = tsk;
	bind_info = rpc_get_bind_info(client_task);

	bind_info_lock(bind_info);
	server_task = bind_info->server_task;
	server_regs = task_pt_regs(server_task);

	if (check_cpu(client_task, server_task, bind_info->cpu) != 0) {
		rpc_emerg("unexpected cpu migration!\n");
		BUG();
	}

	if (sysctl_rpc_kernel_debug)
		rtpc_show_exception_info(is_el0_sync, tsk);

	if (sysctl_rpc_kernel_stats) {
		struct rpc_stat_info *const rpc_stat = __this_cpu_read(cpu_rpc_stat);
		++rpc_stat->nr_fixup;
		if (!is_el0_sync) {
			++rpc_stat->nr_fixup_irq;
		} else if (!type_syscall) {
			++rpc_stat->nr_fixup_exception;
		} else {
			++rpc_stat->nr_fixup_syscall;
			if (scno <= STAT_SYSCNO_MAX && scno > 0)
				++rpc_stat->syscall_table[scno];
			else
				rpc_err("scno %d out of record range\n", scno);
		}
	}

#ifdef CONFIG_KASAN
	if ((void *)thread_saved_sp(server_task) != bind_info->server_thread_save.start_sp) {
		rpc_emerg("thread_saved_sp(tsk) != bind_info->start_sp!\n");
		BUG();
	}
	__memset(kasan_mem_to_shadow((void *)thread_saved_sp(server_task)), 0,
				     bind_info->server_thread_save.kstack_kasan_size);
#endif
	rtpc_switch_pt_regs(server_regs, client_regs);
	rtpc_correct_task_struct(server_task);
	rtpc_sched_switch(client_task, server_task);
	rtpc_context_switch_kernel(client_task, server_task);

	rtpc_set_current_prepare(server_task, server_regs);

	bind_info_unlock(bind_info);
	rpc_debug("[TYPE_2] change real task end in exception\n");
}

static unsigned long rpc_ret_call_exception(struct task_struct *const server_task,
					    struct rpc_bind_info *const bind_info,
					    struct pt_regs *const server_regs)
{
	void *client_save_regs;
	struct pt_regs *client_regs;
	struct task_struct *client_task;

	bind_info_lock(bind_info);

	client_task = bind_info->client_task;
	client_regs = task_pt_regs(client_task);
	client_save_regs = rpc_get_client_save_regs(client_task);

	if (check_cpu(client_task, server_task, bind_info->cpu) != 0) {
		bind_info_unlock(bind_info);
		rpc_emerg("unexpected cpu migration!\n");
		rtpc_set_return_value(server_regs, -EFAULT);
		// goto .kernel_exit
		return EXCEPTION_CALL;
	}

#ifdef CONFIG_KASAN
	if ((void *)thread_saved_sp(client_task) != bind_info->client_thread_save.start_sp) {
		rpc_emerg("thread_saved_sp(tsk) != bind_info->start_sp!\n");
		BUG();
	}
	__memset(kasan_mem_to_shadow((void *)thread_saved_sp(client_task)), 0,
				     bind_info->client_thread_save.kstack_kasan_size);
#endif
	rtpc_switch_pt_regs(client_regs, client_save_regs);
	rtpc_correct_task_struct(client_task);
	rtpc_sched_switch(server_task, client_task);
	rtpc_context_switch(server_task, client_task);
	rpc_switch_data(server_task, client_task);
	bind_info->is_calling = 0;

	rtpc_set_return_value(client_regs, 0);
	rtpc_set_current_prepare(client_task, client_regs);

	bind_info_unlock(bind_info);
	rpc_debug("[TYPE_1] ret call end with exception\n");
	return EXCEPTION_RET_EXP;
}

static int rpc_get_exception_type(struct task_struct *tsk, const bool type_syscall,
				  const struct pt_regs *const regs, const int scno)
{
	unsigned long cmd;
	unsigned int is_ret_call = 0;
	unsigned int is_pgtable_diff = 0;

	if (type_syscall && scno == __NR_ioctl) {
		cmd = get_cmd(regs);
		if (cmd == CMD_WAIT_AND_RET_CALL)
			is_ret_call = 1;
		else if (cmd == CMD_DO_CALL)
			return EXCEPTION_CALL;
	}

	if (is_pgtable_diff_check(tsk))
		is_pgtable_diff = 1;

	return (is_pgtable_diff << 1) | is_ret_call;
}

__aligned(64)
asmlinkage unsigned long rpc_do_exception(const unsigned long is_el0_sync)
{
	enum rpc_exception_type type;
	struct task_struct *tsk = current;
	struct rpc_bind_info *bind_info;
	bool type_syscall;
	struct pt_regs *regs;
	int scno;

	if (unlikely(!tsk->mm))
		return 0;

	type_syscall = is_syscall(is_el0_sync);
	if (type_syscall) {
		regs = task_pt_regs(tsk);
		scno = get_scno(regs);
	}

	type = rpc_get_exception_type(tsk, type_syscall, regs, scno);
	if (type == EXCEPTION_NONE)
		return type;

	rpc_debug("task = %s/%d, execption type %d\n", tsk->comm, tsk->pid, (int)type);

	bind_info = rpc_get_bind_info(tsk);
	if (unlikely(!bind_info)) {
		if (type != EXCEPTION_CALL && type != EXCEPTION_RET_EXP) {
			rpc_emerg("no bind_info, type = %d\n", (int)type);
			// When type is not do_call or ret_call_exception, we will use bind_info
			// directly without any checks.
			BUG();
		}
	} else if (unlikely(bind_info->need_exit)) {
		if (type != EXCEPTION_RET_EXP && type != EXCEPTION_CALL) {
			rpc_emerg("get need_exit when type = %d\n", (int)type);
			// Only do_call/ret_exception will see need_exit.
			// When it is a fixup/ret_call_fast, there is
			// no chance to return error code.
			BUG();
		}
		rtpc_set_return_value(task_pt_regs(current), -ENOLINK);
		return EXCEPTION_CALL;
	}

	if (likely(type == EXCEPTION_CALL)) {
		rpc_do_call_fast(tsk);
	} else if (likely(type == EXCEPTION_RET_FAST)) {
		rpc_ret_call_fast(tsk);
	} else if (type == EXCEPTION_FIXUP) {
		if (!type_syscall)
			regs = task_pt_regs(tsk);
		rpc_fixup_exception(tsk, is_el0_sync, type_syscall, regs, scno);
	} else {
		if (unlikely(!bind_info) || unlikely(bind_info->is_calling == 0)) {
			const int ret = rpc_wait_call_fast(tsk, bind_info);

			rtpc_set_return_value(regs, ret);
			// goto .kernel_exit
			return EXCEPTION_CALL;
		}
		return rpc_ret_call_exception(tsk, bind_info, regs);
	}

	return type;
}

static int rpc_empty_error(struct file *filp, unsigned long arg, struct task_struct *tsk)
{
	rpc_err("this interface cannot be use!\n");
	return -EINVAL;
}

static struct rpc_cmd_node_s rpc_cmd_node[] = {
	{CMD_SERVER_INIT, "CMD_SERVER_INIT", rpc_server_init},
	{CMD_SERVER_EXIT, "CMD_SERVER_EXIT", rpc_empty_error},
	{CMD_SERVER_THREAD_BIND, "CMD_SERVER_THREAD_BIND", rpc_server_thread_bind},
	{CMD_SERVER_THREAD_UNBIND, "CMD_SERVER_THREAD_UNBIND", rpc_empty_error},
	{CMD_SERVER_THREAD_DESTROY, "CMD_SERVER_THREAD_DESTROY", rpc_empty_error},
	{CMD_CLIENT_INIT, "CMD_CLIENT_INIT", rpc_client_init},
	{CMD_CLIENT_EXIT, "CMD_CLIENT_EXIT", rpc_empty_error},
	{CMD_CLIENT_THREAD_INIT, "CMD_CLIENT_THREAD_INIT", rpc_client_thread_init},
	{CMD_CLIENT_THREAD_EXIT, "CMD_CLIENT_THREAD_EXIT", rpc_empty_error},
	{CMD_CLIENT_THREAD_BIND, "CMD_CLIENT_THREAD_BIND", rpc_client_thread_bind},
	{CMD_CLIENT_THREAD_UNBIND, "CMD_CLIENT_THREAD_UNBIND", rpc_empty_error},
	/* In fast path mode, ioctl with CMD_DO_CALL will be intercepted to do_call_fast in:
	 * entry.S: kernel_entry
	 * rtos_rtpc_rpc.c: rpc_do_exception
	 *
	 * It will never run to rpc_get_cmd_node below.
	 */
	{CMD_DO_CALL, "CMD_DO_CALL", rpc_empty_error},
	{CMD_WAIT_AND_RET_CALL, "CMD_WAIT_AND_RET_CALL", rpc_empty_error},
};

static struct rpc_cmd_node_s *rpc_get_cmd_node(unsigned int cmd)
{
	size_t i;
	const size_t cmd_count = sizeof(rpc_cmd_node) / sizeof(struct rpc_cmd_node_s);

	for (i = 0; i < cmd_count; i++) {
		if (rpc_cmd_node[i].cmd == cmd)
			return &rpc_cmd_node[i];
	}

	return NULL;
}

static long rpc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	struct task_struct *tsk = current;
	struct rpc_cmd_node_s *cmd_node = rpc_get_cmd_node(cmd);

	if (!cmd_node) {
		rpc_err("task = %s/%d, cmd = 0x%x is invalid\n", tsk->comm, tsk->pid, cmd);
		return -EINVAL;
	}

	rpc_debug("[%s] task = %s/%d, cmd = 0x%x, arg = %lu\n",
			cmd_node->cmd_name, tsk->comm, tsk->pid, cmd, arg);

	return cmd_node->cmd_func(filp, arg, tsk);
}

static int rpc_mmap(struct file *flip, struct vm_area_struct *vma)
{
	void *data_page;
	unsigned long pfn;
	unsigned long vmsize;
	struct task_struct *tsk = current;

	if (!capable(CAP_SYS_ADMIN)) {
		rpc_err("permission check failed\n");
		return -EACCES;
	}

	data_page = rpc_get_data_page(tsk);
	if (!data_page) {
		data_page = (void *)get_zeroed_page(GFP_KERNEL);
		if (!data_page) {
			rpc_err("get zero page failed\n");
			return -ENOMEM;
		}
		SetPageReserved(virt_to_page(data_page));
		rpc_set_data_page(tsk, data_page);
	}

	pfn = virt_to_pfn(data_page);
	vmsize = vma->vm_end - vma->vm_start;
	if (vmsize != PAGE_SIZE) {
		rpc_free_data_page(tsk);
		rpc_err("vmsize %lu != PAGE_SIZE %lu\n", vmsize, PAGE_SIZE);
		return -ENXIO;
	}
	if (remap_pfn_range(vma, vma->vm_start, pfn, vmsize, vma->vm_page_prot)) {
		rpc_free_data_page(tsk);
		rpc_err("remap_pfn_range failed\n");
		return -EAGAIN;
	}

	rpc_debug("%s/%d mmap success\n", tsk->comm, tsk->pid);

	return 0;
}

#ifdef CONFIG_PROC_FS
static int rtpc_proc_show_call_stats(struct seq_file *m, void *v)
{
	int cpu;
	size_t i;

	if (!sysctl_rpc_kernel_stats) {
		seq_puts(m, "rpc stats is not enabled!\n");
		return 0;
	}

	for_each_possible_cpu(cpu) {
		struct rpc_stat_info *const rpc_stat = per_cpu(cpu_rpc_stat, cpu);

		seq_printf(m, "%d %lu %lu %lu %lu %lu\n", cpu, rpc_stat->nr_call,
			   rpc_stat->nr_fixup, rpc_stat->nr_fixup_irq,
			   rpc_stat->nr_fixup_exception, rpc_stat->nr_fixup_syscall);
		for (i = 0; i <= STAT_SYSCNO_MAX; ++i)
			seq_printf(m, " %lu: %lu", i, rpc_stat->syscall_table[i]);
		seq_putc(m, '\n');
		for (i = 0; i <= STAT_IRQ_MAX; ++i)
			seq_printf(m, " %lu: %lu", i, rpc_stat->irq_table[i]);
		seq_putc(m, '\n');
	}
	return 0;
}
#endif

static const struct file_operations rpc_fops = {
	.owner          = THIS_MODULE,
	.unlocked_ioctl = rpc_ioctl,
	.compat_ioctl   = rpc_ioctl,
	.mmap           = rpc_mmap,
};

static struct miscdevice rpc_dev = {
	MISC_DYNAMIC_MINOR,
	"rtos_rpc",
	&rpc_fops
};

static int rtpc_call_stats_enable_show(struct seq_file *m, void *v)
{
	seq_printf(m, "%u\n", sysctl_rpc_kernel_stats);
	return 0;
}

static int rtpc_call_stats_enable_open(struct inode *inode, struct file *filp)
{
	return single_open(filp, rtpc_call_stats_enable_show, NULL);
}

static ssize_t rtpc_call_stats_enable_write(struct file *file, const char __user *buf,
					    size_t count, loff_t *ppos)
{
	char kbuf[2];
	int cpu;

	if (count == 0)
		return -EINVAL;

	if (count > 2)
		count = 2;

	if (copy_from_user(kbuf, buf, count))
		return -EFAULT;

	if (kbuf[0] != '1')
		return -EINVAL;

	if (count > 1) {
		if (kbuf[1] != '\0' && kbuf[1] != '\n')
			return -EINVAL;
	}

	if (READ_ONCE(sysctl_rpc_kernel_stats))
		return -EEXIST;

	for_each_possible_cpu(cpu) {
		struct rpc_stat_info *const stat_info = vzalloc(sizeof(struct rpc_stat_info));

		if (stat_info == NULL)
			goto label_failed;

		per_cpu(cpu_rpc_stat, cpu) = stat_info;
	}
	sysctl_rpc_kernel_stats = 1;
	return 0;

label_failed:
	for_each_possible_cpu(cpu) {
		vfree(per_cpu(cpu_rpc_stat, cpu));
		per_cpu(cpu_rpc_stat, cpu) = NULL;
	}
	return -ENOMEM;
}

static const struct proc_ops rtpc_call_stats_enable_fops = {
	.proc_open = rtpc_call_stats_enable_open,
	.proc_write = rtpc_call_stats_enable_write,
	.proc_read = seq_read,
	.proc_lseek = seq_lseek,
	.proc_release = single_release,
};

static int __init rpc_init(void)
{
	int ret;

	ret = misc_register(&rpc_dev);
	if (ret) {
		rpc_err("register device failed\n");
		return -1;
	}

	spin_lock_init(&bind_info_list_lock);

#ifdef CONFIG_PROC_FS
	// create proc interface: /proc/rtpc_call_stats
	if (proc_create_single("rtpc_call_stats", 0600, NULL, rtpc_proc_show_call_stats) == NULL) {
		rpc_err("rpc proc create failed\n");
		return -1;
	}
	if (!proc_create("rtpc_call_stats_enable", 0600, NULL, &rtpc_call_stats_enable_fops)) {
		rpc_err("rpc proc create failed!\n");
		remove_proc_entry("rtpc_call_stats", NULL);
		return -1;
	}
#endif

	pr_info("RTOS Process Context for RPC init success\n");

	return 0;
}

late_initcall(rpc_init);

static void do_rtpc_signal(void *info)
{
	struct task_struct *const t = info;
	struct rtos_task_struct *const rtos_task = task_to_rtos_task(t);
	struct rpc_bind_info *const bind_info = rtos_task->rpc_task.bind_info;
	int state;

	if (!bind_info) {
		rpc_emerg("no bind_info, early exit\n");
		put_task_struct(t);
		return;
	}

	rpc_alert("set need_exit\n");
	bind_info->need_exit = 1;
	state = bind_info->init_state;
	if (state == BIND_INFO_INITED || state == SERVER_WAITED || state == CLIENT_SAVED)
		wake_up_state(t, TASK_RTPC_UNWAKEABLE);
	put_task_struct(t);
}
