/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2021-2024.
 * Description: RTOS Process Context for RPC header
 * Create: 2021-08-15
 */
#ifndef __RTOS_RTPC_RPC_H
#define __RTOS_RTPC_RPC_H

#define RPC_KERNEL_DISABLE	0
#define RPC_KERNEL_ENABLE_SLOW	1
#define RPC_KERNEL_ENABLE_FAST	2

#ifndef __ASSEMBLER__

#ifdef CONFIG_ARM64
struct rpc_bind_info;
struct rpc_bind_info_ptr;

struct rpc_task_struct {
	void *data_page;
	struct rpc_bind_info *bind_info;
	struct rpc_bind_info_ptr *bind_info_ptr;
	void *client_save_regs;
	int task_pinned;
	struct task_struct *next_task;
	call_single_data_t csd;
};


void rpc_mm_release(struct task_struct *tsk);
asmlinkage void rpc_recover_server_thread(void);
asmlinkage unsigned long rpc_do_exception(unsigned long is_el0_sync);
struct task_struct *rpc_pick_next_task(struct rq *rq, struct task_struct *prev);
int rpc_select_task_rq(struct task_struct *tsk);
int rpc_can_migrate_task(struct task_struct *tsk);
int rpc_check_preempt_curr(struct task_struct *tsk);
#else
#include <linux/kref.h>

#define rtpc_notrace notrace __no_kcsan __no_sanitize_address __no_sanitize_coverage

typedef atomic_t rtpc_spinlock_t;
struct rpc_service_info;

struct rpc_task_struct {
	/*
	 * About the service_info field:
	 *
	 * 1. This field is read only by the current task, except for the following two situations:
	 *	When sending a signal, the sender may read the service_info of the receiver
	 *	When setting a task's scheduler, may read task->service_info
	 *
	 * 2. This field can only be modified by current task
	 *
	 * 3. If changing it from NULL to val, we can use WRITE_ONCE without any locking
	 *
	 * 4. If changing it from val to NULL, task's siglock must be held
	 */
	struct rpc_service_info *service_info;

	bool has_init_as_client;
};

/*
 * Lock order：
 * -> p->siglock
 *   -> service_info_list_lock
 *     -> kfee()'s memory_lock
 *   -> service_info_lock
 *     -> p->pi_lock			| try_to_wake_up()
 *       -> rq_lock			|
 *
 * We must use disable irq to lock service_info_lock/service_info_list_lock, because
 * in irq context, it may send signal and get p->siglock
 */

enum server_init_state_val {
	SERVICE_CREATED,
	SERVER_START_SLEEP,
	SERVER_RTPC_SLEPT,
};
#ifdef CONFIG_RTOS_RTPC_RPC_DEBUG_BUILD
enum need_exit_state_val {
	/* kill C/S but is not in 'need to fixup' state */
	KILL_NORMAL = 0x1,
	/* kill C/S and is at 'need to fixup' state */
	KILL_FIXUP = 0x2,
	/* C/S exit but is not in 'need to fixup' state */
	EXIT_NORMAL = 0x4,
	/* a C exit at 'need to fixup' state */
	EXIT_FIXUP = 0x8,
};
#endif
struct rpc_service_info {
	rtpc_spinlock_t lock;
#ifdef CONFIG_RTOS_RTPC_RPC_DEBUG_BUILD
	unsigned int need_exit;
#else
	bool need_exit;
#endif
	bool fixup_done;
	struct task_struct *calling_client_task;
	struct task_struct *server_task;

	bool server_need_wake;
	bool calling_client_need_wake;

	const unsigned int session_id;

	int server_init_state;

	struct kref ref;

	struct list_head node;
};

static __always_inline void rtpc_spin_lock(rtpc_spinlock_t *const lock)
{
	while (atomic_xchg_acquire(lock, 1))
		;
}

static __always_inline void rtpc_spin_unlock(rtpc_spinlock_t *const lock)
{
	atomic_set_release(lock, 0);
}

#define rtpc_spin_lock_irqsave(lock, flags) \
	do { \
		local_irq_save(flags); \
		rtpc_spin_lock(lock); \
	} while (0)

#define rtpc_spin_unlock_irqrestore(lock, flags) \
	do { \
		rtpc_spin_unlock(lock); \
		local_irq_restore(flags); \
	} while (0)

void rtpc_exit(struct task_struct *tsk);

static inline void rpc_mm_release(struct task_struct *tsk)
{
}

#endif /* CONFIG_ARM64 */

extern unsigned int sysctl_rpc_kernel_enable;

#endif /* __ASSEMBLER__ */
#endif /* __RTOS_RTPC_RPC_H */
