// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 * Author: Huawei OS Kernel Lab
 * Create: Thu Aug 15 16:14:23 2023
 */

#include <linux/key.h>
#include <linux/init.h>
#include <linux/rwsem.h>
#include <linux/init_task.h>
#include <linux/kasan.h>
#include <linux/fs_struct.h>
#include <linux/fdtable.h>
#include <linux/mqueue.h>
#include <linux/utsname.h>
#include <linux/proc_ns.h>
#include <linux/init_task.h>
#include <linux/kthread.h>
#include <uapi/linux/keyctl.h>
#include <linux/kmemleak.h>
#include <linux/mman.h>		/* for PROT_{READ|WRITE|EXEC} */
#include <linux/rtmutex.h>

/* define the fowlling gloab data for INIT_TASK and INIT_THREAD_INFO */
struct pid init_struct_pid = {
	.count          = REFCOUNT_INIT(1),
	.tasks          = {
		{ .first = NULL },
		{ .first = NULL },
		{ .first = NULL },
	},
	.level          = 0,
	.numbers        = { {
		.nr             = 0,
		.ns             = &init_pid_ns,
	}, }
};

/* kernel/sys.c */
DECLARE_RWSEM(uts_sem);

struct files_struct init_files;
struct nsproxy init_nsproxy =  {
#ifdef CONFIG_NET
	.net_ns = &init_net,
#endif
	.uts_ns			= &init_uts_ns,
};
struct mm_struct init_mm;

/* kernel/pid.c */
/*
 * PID-map pages start out as NULL, they get allocated upon
 * first use and are never deallocated. This way a low pid_max
 * value does not cause lots of bitmaps to be allocated, but
 * the scheme scales to up to 4 million PIDs, runtime.
 */
struct pid_namespace init_pid_ns = {
	.kref = KREF_INIT(2),
	.idr = IDR_INIT(init_pid_ns.idr),
	.pid_allocated = PIDNS_ADDING,
	.level = 0,
	.child_reaper = &init_task,
	.user_ns = &init_user_ns,
	.ns.inum = PROC_PID_INIT_INO,
#ifdef CONFIG_PID_NS
	.ns.ops = &pidns_operations,
#endif
};
EXPORT_SYMBOL_GPL(init_pid_ns);


struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
{
	return &init_pid_ns;
}
EXPORT_SYMBOL_GPL(task_active_pid_ns);

typedef struct {
	int (*fn)(void *d);
	void *arg;
	struct completion child_done;
	struct completion parent_done;
	liblinux_pal_thread_handle_t thread_handle;
} __kthread_data;

static void kernel_thread_cleanup(void *data)
{
	struct task_struct *p = (struct task_struct *)data;

	BUG_ON(p == NULL);

	if (p->io_context)
		exit_io_context(p);

	if (p->flags & PF_KTHREAD)
		put_cred(p->cred);

	put_task_struct(p);
}

static void __liblinux_thread_join_rcu(struct rcu_head *head)
{
	struct thread_info *ti = container_of(head, struct thread_info, rcu_head);
	struct task_struct *k = container_of(ti, struct task_struct, thread_info);
	void *code = NULL;
	void *exit_code = NULL;

	/* get thread exit_code before destroyed by join */
	code = (void *)(unsigned long)k->exit_code;

	/* check free_task() */
	rt_mutex_debug_task_free(k);
	ftrace_graph_exit_task(k);
	if (k->flags & PF_KTHREAD)
		free_kthread_struct(k);

	(void)liblinux_pal_thread_join_ex(&ti->thread_handle, &exit_code);
	BUG_ON(code != exit_code);
}

void __put_task_struct(struct task_struct *t)
{
	kmemleak_free(t);
	lockdep_free_task(t);

	/* skip thread join for non-LDK managed threads */
	if (!is_thread_managed(task_thread_info(t)))
		return;

	if (current != t)
		__liblinux_thread_join_rcu(&task_thread_info(t)->rcu_head);
	else
		call_rcu(&task_thread_info(t)->rcu_head, __liblinux_thread_join_rcu);

}
EXPORT_SYMBOL_GPL(__put_task_struct);

static notrace __no_sanitize_address
struct task_struct *kernel_thread_init_data(void)
{
	/* declare task_struct as TLS, all threads have an independent copy */
	static __thread struct task_struct tls_task __aligned(8);
	struct task_struct *p = &tls_task;

	/* init task_struct in TLS */
	memcpy(p, &init_task, sizeof(struct task_struct));
	p->stack = (void *)0x0; /* avoid p->stack be used */

	/*
	 * in linux, the 2 usage is drop when the thread end with do_exit and
	 * when the kernel schedule from this dead thread to another.
	 * currently we simulate the second put in do_exit
	 */
	refcount_set(&p->usage, 2);
	task_thread_info(p)->preempt_count = 0;

	if (liblinux_pal_is_thread_ctx())
		p->cred = get_cred(&init_cred);
	else
		p->flags &= (~(PF_KTHREAD));

	if (liblinux_pal_thread_set_my_data((void *)p) != 0)
		return NULL;

	return p;
}

static notrace __no_sanitize_address
struct task_struct *kernel_thread_setup(void)
{
	struct task_struct *p = NULL;

	p = kernel_thread_init_data();
	if (p == NULL)
		return NULL;

	lockdep_init_task(p);

	/* `p` who copyed from `init_task` and `spin_lock` depends on `current` */
	spin_lock(&init_task.fs->lock);
	init_task.fs->users++;
	spin_unlock(&init_task.fs->lock);
	spin_lock_init(&p->alloc_lock);
	raw_spin_lock_init(&p->pi_lock);

	/*
	 * The `task_struct` is allocated by TLS, which will be released
	 * when thread exit. So it never leak.
	 */
	kmemleak_alloc(p, sizeof(struct task_struct), 0 /* not leak */, GFP_KERNEL);

	return p;
}

/*
 * the caller of this function must be actv thread only.
 */
notrace __no_sanitize_address int liblinux_thread_setup(void)
{
	struct task_struct *p = kernel_thread_setup();

	if (p == NULL)
		return -EFAULT;

	/*
	 * actv task will not call do_exit,
	 * so refcount should be set to 1.
	 */
	refcount_set(&p->usage, 1);

	return 0;
}
EXPORT_SYMBOL(liblinux_thread_setup);

void do_exit(long code)
{
	current->exit_code = code;
	exit_fs(current);

	set_special_state(TASK_DEAD);
	/*
	 * simulate linux put_task_struct after task finish
	 * note that the task_struct will go away and caller must
	 * ensure the task_struct is still alive when calling kthread_stop
	 */
	put_task_struct(current);

	liblinux_pal_thread_exit_ex(ERR_PTR(code));

	/* Avoid "noreturn function does return".  */
	for (;;)
		cpu_relax();    /* For when BUG is null */
}
EXPORT_SYMBOL(do_exit);

void complete_and_exit(struct completion *comp, long code)
{
	if (comp)
		complete(comp);

	do_exit(code);
}
EXPORT_SYMBOL(complete_and_exit);

static void *kernel_thread_helper(void *arg)
{
	struct task_struct *p = NULL;
	__kthread_data *kt = (__kthread_data *)arg;

	/* store kt->[fn|arg], it will be overwrite when create_lock release */
	int (*_fn)(void *) = kt->fn;
	void *_arg = kt->arg;
	int ret;

	p = kernel_thread_setup();
	if (!p)
		return ERR_PTR(-EFAULT);

	wait_for_completion(&kt->parent_done);

	task_thread_info(p)->thread_handle.handle = kt->thread_handle.handle;
	p->pid = kt->thread_handle.tid;

	complete(&kt->child_done);

	ret = (*_fn)(_arg);

	do_exit(ret);

	return NULL;
}

/*
 * Create a kernel thread.
 */
int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags __attribute__((unused)))
{
	int ret;
	__kthread_data *kt;

	kt = kmalloc(sizeof(__kthread_data), GFP_KERNEL);
	if (!kt)
		return -ENOMEM;

	kt->fn  = fn;
	kt->arg = arg;

	init_completion(&kt->child_done);
	init_completion(&kt->parent_done);

	ret = liblinux_pal_thread_create(kernel_thread_helper, (void *)kt,
					 &(kt->thread_handle));
	if (ret != 0) {
		kfree(kt);
		return -EAGAIN;
	}

	complete(&kt->parent_done);
	wait_for_completion(&kt->child_done);

	kfree(kt);

	/* pid has not been used yet, so just return 0 */
	return 0;
}

extern void liblinux_raw_thread_init(void);
void kernel_thread_init(void)
{
	int ret;
	struct task_struct *p = NULL;

	ret = liblinux_pal_thread_init_ex(kernel_thread_cleanup);
	if (ret < 0)
		panic("kernel thread init failed: %d\n", ret);

	liblinux_raw_thread_init();
	p = kernel_thread_init_data();
	if (p == NULL)
		panic("kernel thread init data failed\n");
}

/* used in INIT_TASK */
long do_no_restart_syscall(struct restart_block *param)
{
	return -EINTR;
}

void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
{
	int ret;

	ret = liblinux_pal_thread_setname(&task_thread_info(tsk)->thread_handle, buf);
	if (ret < 0)
		pr_warn("thread set name failed, ret = %d, name=%s\n", ret, buf);

	task_lock(tsk);
	strlcpy(tsk->comm, buf, sizeof(tsk->comm));
	task_unlock(tsk);
}

char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
{
	task_lock(tsk);
	strncpy(buf, tsk->comm, buf_size);
	task_unlock(tsk);
	return buf;
}
EXPORT_SYMBOL_GPL(__get_task_comm);

notrace unsigned int liblinux_get_processor_id(void)
{
	return liblinux_pal_processor_id();
}
EXPORT_SYMBOL(liblinux_get_processor_id);
