/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023.
 * Description: light seccomp support
 * Create: 2023-9-26
 */
#ifndef KERNEL_LIGHT_SECCOMP_H
#define KERNEL_LIGHT_SECCOMP_H

#ifdef CONFIG_EULEROS_HAVE_ARCH_SECCOMP_BITMAP

extern void put_seccomp_bitmap(struct task_struct *tsk);
extern void get_seccomp_bitmap(struct task_struct *tsk);

static u32 seccomp_run_filters(const struct seccomp_data *sd,
			       struct seccomp_filter **match);
static void seccomp_cache_prepare(struct seccomp_filter *sfilter);
static inline pid_t seccomp_can_sync_threads(void);
static inline void seccomp_sync_threads(unsigned long flags);

#define LIGHT_SEC_ON    1
int lseccomp_on = 0;

static int __init lseccomp_setup(char *s)
{
	if (!s)
		return -EINVAL;
	if (!strcmp(s, "on"))
		lseccomp_on = LIGHT_SEC_ON;
	return 0;
}
early_param("light_seccomp", lseccomp_setup);

static inline void populate_seccomp_data_stage1(struct seccomp_data *sd)
{
	sd->nr = syscall_get_nr(current, task_pt_regs(current));
	sd->arch = syscall_get_arch(current);
}

static inline void populate_seccomp_data_stage2(struct seccomp_data *sd)
{
	unsigned long args[6];
	int index = 0;
	syscall_get_arguments(current, task_pt_regs(current), args);
	for (index = 0; index < 6; index++)
		sd->args[index] = args[index];

	sd->instruction_pointer = KSTK_EIP(current);
}

static inline void seccomp_bitmap_free(struct seccomp_bitmap *bitmap)
{
	if (bitmap) {
		bitmap_free(bitmap->allow);
		kfree(bitmap);
	}
}

/**
 * seccomp_new_bitmap - new a seccomp bitmap for use.
 *
 * Returns bitmap on success or an ERR_PTR on failure.
 */
static struct seccomp_bitmap *seccomp_new_bitmap(unsigned int  nr)
{
	struct seccomp_bitmap *bitmap = NULL;

#ifdef CONFIG_COMPAT
	if (nr != SECCOMP_ARCH_NATIVE_NR &&
		nr != SECCOMP_ARCH_COMPAT_NR)
		return ERR_PTR(-EINVAL);
#else
	if (nr != SECCOMP_ARCH_NATIVE_NR)
		return ERR_PTR(-EINVAL);
#endif

	bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL);
	if (!bitmap)
		return ERR_PTR(-ENOMEM);

	/* Allocate bitmap. */
	bitmap->allow = bitmap_alloc(nr, GFP_KERNEL);
	if (!bitmap->allow)
		goto out_free_bitmap;

	/* Initialize bitmap. */
	bitmap_fill(bitmap->allow, nr);

	refcount_set(&bitmap->usage, 1);
	return bitmap;

out_free_bitmap:
	kfree(bitmap);
	return ERR_PTR(-ENOMEM);
}

/**
 * seccomp_prepare_bitmap - prepare a seccomp bitmap for use.
 * @bitmap: bitmap array to return
 *
 * Returns 0 on success or -ev on failure.
 */
static long seccomp_prepare_bitmap(struct seccomp_bitmap *bitmap[])
{
	struct seccomp_bitmap *native;
#ifdef CONFIG_COMPAT
	struct seccomp_bitmap *compat;
#endif
	if (lseccomp_on != LIGHT_SEC_ON)
		return 0;
	if (!bitmap)
		return -EINVAL;

	native = seccomp_new_bitmap(SECCOMP_ARCH_NATIVE_NR);
	if (IS_ERR(native))
		return PTR_ERR(native);

#ifdef CONFIG_COMPAT
	compat = seccomp_new_bitmap(SECCOMP_ARCH_COMPAT_NR);
	if (IS_ERR(compat)) {
		seccomp_bitmap_free(native);
		return PTR_ERR(compat);
	}
#endif

	bitmap[SECCOMP_BITMAP_NATIVE] = native;
#ifdef CONFIG_COMPAT
	bitmap[SECCOMP_BITMAP_COMPAT] = compat;
#endif

	return 0;
}
 
/**
 * seccomp_emulate_filter - emulate seccomp filter
 * @fprog: BPF program to emulate
 * @arch: AUDIT_ARCH_*
 * @nr: syscall number
 * @filter_ret: resulting filter result to return
 *
 * Try to statically determine whether @filter will always return
 * a constant result (only touch arch/nr in struct seccomp_data,
 * no others like args) when running for syscall @nr under
 * architecture @arch.
 *
 * Returns true if the result could be determined or false if not;
 * if true, the filter result will be stored in @filter_ret.
 */
static bool seccomp_emulate_filter(struct sock_fprog_kern *fprog,
			unsigned int arch, int nr, unsigned int *filter_ret)
{
	unsigned int flen;
	unsigned int reg_value = 0;
	int pc;
	
	if (!fprog)
		return false;

	flen = fprog->len;

	for (pc = 0; pc < flen; pc++) {
		struct sock_filter *insn = &fprog->filter[pc];
		u16 code = insn->code;
		u32 k = insn->k;
		bool op_res;

		switch (code) {
		case BPF_LD | BPF_W | BPF_ABS:
			if (k == offsetof(struct seccomp_data, nr))
				reg_value = nr;
			else if (k == offsetof(struct seccomp_data, arch))
				reg_value = arch;
			else
				return false; /* non-constant result. */
			break;
		case BPF_RET | BPF_K:
			*filter_ret = k;
			return true; /* constant result. */
		case BPF_JMP | BPF_JA:
			pc += k;
			break;
		case BPF_JMP | BPF_JEQ | BPF_K:
			op_res = reg_value == k;
			pc += op_res ? insn->jt : insn->jf;
			break;
		case BPF_JMP | BPF_JGE | BPF_K:
			op_res = reg_value >= k;
			pc += op_res ? insn->jt : insn->jf;
			break;
		case BPF_JMP | BPF_JGT | BPF_K:
			op_res = reg_value > k;
			pc += op_res ? insn->jt : insn->jf;
			break;
		default:
			return false; /* unknown code. */
		}
	}

	return false;
}

/**
 * seccomp_update_bitmap - return a new seccomp bitmap for use.
 * step 1: copy the old bitmap;
 * step 2: run bpf emulater to generate a new bitmap from
 *         the current->seccomp.orig_prog filter;
 * step 3: merge the new bitmap with the old one.
 *
 * @bitmap: seccomp bitmap to generate
 * @old: seccomp bitmap to be merged into
 * @arch: arch to emulate
 * @nr_syscalls: syscall table size
 */
static void seccomp_update_bitmap(struct seccomp_bitmap *bitmap,
			struct seccomp_bitmap *old, u32 arch, u32 nr_syscalls)
{
	struct sock_fprog_kern *fprog = current->seccomp.filter->prog->orig_prog;
	u32 filter_ret, action;
	int nr;
	bool result;

	if (old)
		bitmap_copy(bitmap->allow, old->allow, nr_syscalls);

	for (nr = 0; nr < nr_syscalls; nr++) {
		result = seccomp_emulate_filter(fprog, arch, nr, &filter_ret);
		if (result) {
			action = filter_ret & SECCOMP_RET_ACTION_FULL;
			/* Constant rules! */
			switch (action) {
			default:
				/* SECCOMP_RET_'OTHERS' */
				break;
			case SECCOMP_RET_ALLOW:
				/*
				 * No need to consider the SECCOMP_RET_DATA,
				 * since SECCOMP_RET_ALLOW is the starting
				 * state in seccomp_run_filters(). As all
				 * bits in the ALLOW bitmap have already
				 * been set in initlization, there is no
				 * need to change it.
				 */
				continue;
			}
		}

		/* Non-constant rules! Clear bit in the allow bitmap. */
		clear_bit(nr, bitmap->allow);
	}
}

/* get_seccomp_bitmap - increments the ref count of the bitmap on @tsk */
void get_seccomp_bitmap(struct task_struct *tsk)
{
	struct seccomp_bitmap *native;
#ifdef CONFIG_COMPAT
	struct seccomp_bitmap *compat;
#endif
	if (lseccomp_on != LIGHT_SEC_ON)
		return;
	if (!tsk->seccomp.filter)
		return;
	native = tsk->seccomp.filter->native;
#ifdef CONFIG_COMPAT
	compat = tsk->seccomp.filter->compat;
#endif

	if (!native)
		return;

	/* Reference count is bounded by the number of total processes. */
	refcount_inc(&native->usage);

#ifdef CONFIG_COMPAT
	if (!compat)
		return;

	/* Reference count is bounded by the number of total processes. */
	refcount_inc(&compat->usage);
#endif
}

/* put_seccomp_bitmap - decrements the ref count of bitmap in tsk->seccomp. */
void put_seccomp_bitmap(struct task_struct *tsk)
{
	struct seccomp_bitmap *native;
#ifdef CONFIG_COMPAT
	struct seccomp_bitmap *compat;
#endif
	if (lseccomp_on != LIGHT_SEC_ON)
		return;
	if (!tsk->seccomp.filter)
		return;
	native = tsk->seccomp.filter->native;
#ifdef CONFIG_COMPAT
	compat = tsk->seccomp.filter->compat;
#endif

	/* Clean up single-reference native bitmap. */
	if (native && refcount_dec_and_test(&native->usage))
		seccomp_bitmap_free(native);

#ifdef CONFIG_COMPAT
	/* Clean up single-reference compat bitmap. */
	if (compat && refcount_dec_and_test(&compat->usage))
		seccomp_bitmap_free(compat);
#endif
}

/**
 * seccomp_attach_filter_ex: validate and attach filter
 * @flags:  flags to change filter behavior
 * @filter: seccomp filter to add to the current process
 * @bitmap: seccomp bitmap to add to the current process
 *
 * Caller must be holding current->sighand->siglock lock.
 *
 * Returns 0 on success, -ve on error, or
 *   - in TSYNC mode: the pid of a thread which was either not in the correct
 *     seccomp mode or did not have an ancestral seccomp filter
 *   - in NEW_LISTENER mode: the fd of the new listener
 */
static long seccomp_attach_filter_ex(unsigned int flags,
				  struct seccomp_filter *filter, struct seccomp_bitmap *bitmap[])
{
	unsigned long total_insns;
	struct seccomp_filter *walker;

	assert_spin_locked(&current->sighand->siglock);

	/* Validate resulting filter length. */
	total_insns = filter->prog->len;
	for (walker = current->seccomp.filter; walker; walker = walker->prev)
		total_insns += walker->prog->len + 4;  /* 4 instr penalty */
	if (total_insns > MAX_INSNS_PER_PATH)
		return -ENOMEM;

	/* If thread sync has been requested, check that it is possible. */
	if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
		int ret;
		ret = seccomp_can_sync_threads();
		if (ret) {
			if (flags & SECCOMP_FILTER_FLAG_TSYNC_ESRCH)
				return -ESRCH;
			return ret;
		}
	}

	/* Set log flag, if present. */
	if (flags & SECCOMP_FILTER_FLAG_LOG)
		filter->log = true;

	if (lseccomp_on == LIGHT_SEC_ON) {
		if (current->seccomp.filter) {
			filter->native = current->seccomp.filter->native;
#ifdef CONFIG_COMPAT
			filter->compat = current->seccomp.filter->compat;
#endif
		}
	}

	/*
	 * If there is an existing filter, make it the prev and don't drop its
	 * task reference.
	 */
	filter->prev = current->seccomp.filter;
	seccomp_cache_prepare(filter);
	current->seccomp.filter = filter;
	atomic_inc(&current->seccomp.filter_count);

	if (lseccomp_on == LIGHT_SEC_ON) {
		/*
		 * Generate a new bitmap, and merge it with the
		 * current old bitmap if it exists.
		 */
		seccomp_update_bitmap(bitmap[SECCOMP_BITMAP_NATIVE], filter->native, SECCOMP_ARCH_NATIVE, SECCOMP_ARCH_NATIVE_NR);
#ifdef CONFIG_COMPAT
		/*
		 * Generate a new bitmap, and merge it with the
		 * current old bitmap if it exists.
		 */
		seccomp_update_bitmap(bitmap[SECCOMP_BITMAP_COMPAT], filter->compat, SECCOMP_ARCH_COMPAT, SECCOMP_ARCH_COMPAT_NR);
#endif
		put_seccomp_bitmap(current);
		/* Drop the task reference to old bitmap, and attach the new one. */
		current->seccomp.filter->native = bitmap[SECCOMP_BITMAP_NATIVE];
#ifdef CONFIG_COMPAT
		current->seccomp.filter->compat = bitmap[SECCOMP_BITMAP_COMPAT];
#endif
	}

	/* Now that the new filter is in place, synchronize to all threads. */
	if (flags & SECCOMP_FILTER_FLAG_TSYNC)
		seccomp_sync_threads(flags);

	bitmap[SECCOMP_BITMAP_NATIVE] = NULL;
#ifdef CONFIG_COMPAT
	bitmap[SECCOMP_BITMAP_COMPAT] = NULL;
#endif
	return 0;
}

static inline bool is_save_origin(void)
{
	bool save_orig;
	if (lseccomp_on == LIGHT_SEC_ON) {
		save_orig = true;
	} else {
#if defined(CONFIG_CHECKPOINT_RESTORE) || defined(SECCOMP_ARCH_NATIVE)
		save_orig = true;
#else
		save_orig = false;
#endif
	}
	return save_orig;
}

static inline void seccomp_destroy_bitmap(struct seccomp_bitmap *bitmap[])
{
	seccomp_bitmap_free(bitmap[SECCOMP_BITMAP_NATIVE]);
#ifdef CONFIG_COMPAT
	seccomp_bitmap_free(bitmap[SECCOMP_BITMAP_COMPAT]);
#endif
}

static inline void seccomp_read_filter_again(struct seccomp_filter **f)
{
	if (lseccomp_on == LIGHT_SEC_ON) {
		/* The rmb() needs to be called only when f is NULL. */
		if (unlikely(*f == NULL)) {
			/*
			 * Make sure the first filter addtion (from another
			 * thread using TSYNC flag) are seen.
			 */
			rmb();

			/* Read again. */
			*f = READ_ONCE(current->seccomp.filter);
		}
	}
}

#else
static inline void seccomp_bitmap_free(struct seccomp_bitmap *bitmap)
{
}

static inline long seccomp_prepare_bitmap(struct seccomp_bitmap *bitmap[])
{
	return 0;
}

static inline void seccomp_destroy_bitmap(struct seccomp_bitmap *bitmap[])
{
}

static inline void put_seccomp_bitmap(struct task_struct *tsk)
{
}

static inline void get_seccomp_bitmap(struct task_struct *tsk)
{
}

#endif /* CONFIG_EULEROS_HAVE_ARCH_SECCOMP_BITMAP */

#ifdef CONFIG_SECCOMP_FILTER
#ifdef CONFIG_EULEROS_HAVE_ARCH_SECCOMP_BITMAP
static inline bool seccomp_run_bitmaps(struct seccomp_bitmap *bitmap,
									u32 nr, size_t bitmap_size)
{
	/*
	 * 1. Use u32 for nr to avoid the check for case: nr < 0.
	 * 2. X86_X32 ABI will have __X32_SYSCALL_BIT set in nr
	 * which absolutely bigger than bitmap_size. In this case,
	 * we do not handle it and always let it go to the BPF filter.
	 */
	if (nr < bitmap_size) {
		if (test_bit(nr, bitmap->allow))
			return true;
	}

	return false;
}

static inline bool seccomp_try_get_filter(struct seccomp_filter **b)
{
	*b = READ_ONCE(current->seccomp.filter);
	/* The rmb() needs to be called only when b is NULL. */
	if (unlikely(*b == NULL)) {
		/*
			* Make sure the first bitmap addtion (from another
			* thread using TSYNC flag) are seen.
			*/
		rmb();

		/* Read again. */
		*b = READ_ONCE(current->seccomp.filter);
		/*
			* Ensure unexpected behavior doesn't
			* result in failing open.
			*/
		if (unlikely(WARN_ON(*b == NULL)))
			return false;
	}
	return true;
}

static u32 seccomp_check_syscall(const struct seccomp_data **sd,
			       struct seccomp_filter **match)
{
	struct seccomp_data sd_local;
	if (lseccomp_on == LIGHT_SEC_ON) {
		bool sd_flag = false;
		struct seccomp_filter *b = NULL;

		if (!(*sd)) {
			populate_seccomp_data_stage1(&sd_local);
			*sd = &sd_local;
			sd_flag = true;
		}

#ifdef CONFIG_COMPAT
		if ((*sd)->arch == SECCOMP_ARCH_NATIVE) {
#endif
			/*
			 * Make sure cross-thread synced bitmap
			 * points somewhere sane.
			 */
			if (!seccomp_try_get_filter(&b))
				return SECCOMP_RET_KILL_PROCESS;

			if (seccomp_run_bitmaps(b->native, (*sd)->nr, SECCOMP_ARCH_NATIVE_NR))
				return SECCOMP_RET_ALLOW;
#ifdef CONFIG_COMPAT
		} else if ((*sd)->arch == SECCOMP_ARCH_COMPAT) {
			/*
			 * Make sure cross-thread synced bitmap
			 * points somewhere sane.
			 */
			if (!seccomp_try_get_filter(&b))
				return SECCOMP_RET_KILL_PROCESS;

			if (seccomp_run_bitmaps(b->compat, (*sd)->nr, SECCOMP_ARCH_COMPAT_NR))
				return SECCOMP_RET_ALLOW;
		} else {
			WARN_ON(1);
			return SECCOMP_RET_KILL_PROCESS;
		}
#endif

		if (sd_flag)
			populate_seccomp_data_stage2(&sd_local);
	} else {
		if (!(*sd)) {
			populate_seccomp_data_stage1(&sd_local);
			populate_seccomp_data_stage2(&sd_local);
			*sd = &sd_local;
		}
	}
	return seccomp_run_filters(*sd, match);
}

#endif /* CONFIG_EULEROS_HAVE_ARCH_SECCOMP_BITMAP */
#endif /* CONFIG_SECCOMP_FILTER */

#endif
