/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2014-2020.
 * Description: init idump for rtos_mm_struct extend
 * Author: nixiaoming
 * Create: 2014-6-18
 */
#ifndef _FS_DO_DUMP_TRIGGER_C_
#define _FS_DO_DUMP_TRIGGER_C_
#include <linux/init.h>
#include <linux/idump.h>
#include <linux/time64.h>
#include <linux/rtc.h>
#include <linux/nmi.h>
#include <linux/rtos_mm_types.h>
#include <linux/futex.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/coredump.h>
#ifdef CONFIG_RTOS_DEBUG_DUMP_USER_PROCESS
#include <linux/rtos_dump.h>
#endif
#include <linux/sched/mm.h>
#include <linux/mmap_lock.h>
#include <linux/audit.h>
#include <linux/umh.h>

unsigned int idump_trigger_enable;
unsigned int idump_trigger_enable_by_proc;

static int __init idump_trigger_setup(char *str)
{
	idump_trigger_enable = IDUMP_TRIGGER_ENABLE;
	return 0;
}
__setup("idump_trigger_enable", idump_trigger_setup);

/* because of 4.1 had no macro task_is_dead, define it by ourself */
#ifndef task_is_dead
# define task_is_dead(task)	((task)->exit_state != 0)
#endif

static const char *no_allow_case[NO_ALLOW_CASE_MAX] = {
	"allow",
	"__fatal_signal_pending",
	"signal_group_exit",
	"task_is_stopped_or_traced",
	"task_is_dead",
	"TASK_UNINTERRUPTIBLE",
	"tsk->flags PF_EXITING or PF_KTHREAD...",
	"tsk->mm ==NULL",
	"tsk->mm->core_state != NULL",
};

bool task_being_idump(struct task_struct *tsk)
{
	struct rtos_mm_struct *rtos_mm = NULL;

	if (tsk->mm == NULL)
		return 0;

	rtos_mm = mm_to_rtos_mm(tsk->mm);
	if (rtos_mm->idump.core_state == NULL)
		return 0;

	return 1;
}

int task_allow_idump(struct task_struct *tsk)
{
	if (__fatal_signal_pending(tsk))
		return FATAL_SIGNAL_PENDING;

	if (signal_group_exit(tsk->signal))
		return SIGNAL_GROUTP_EXIT;

	if (task_is_stopped_or_traced(tsk))
		return TASK_IS_STOPPED_OR_TRACED;

	if (task_is_dead(tsk))
		return TASK_IS_DEAD;

	if (tsk->futex_state == FUTEX_STATE_DEAD ||
		(tsk->flags & (PF_EXITING | PF_SIGNALED |
			       PF_DUMPCORE | PF_KTHREAD)))
		return TASK_FLAGS_EXIT;

	if (tsk->mm == NULL)
		return MM_NULL;

	/* being coredump */
	if (tsk->mm->core_state != NULL)
		return BEING_COREDUMP;

	return ALLOW_IDUMP;
}

static void get_sys_rtc_time(struct timespec64 *ts, struct rtc_time *tm)
{
	ktime_get_real_ts64(ts);
	rtc_time64_to_tm(ts->tv_sec, tm);
	tm->tm_year += 1900;
	tm->tm_mon += 1;
}

static void print_idump_time(struct task_struct *task, char *info)
{
	struct timespec64 ts;
	struct rtc_time tm;

	get_sys_rtc_time(&ts, &tm);

	pr_info("[idump][Trigger:%s-%d  Dump:%s-%d]"
			"[%04d.%02d.%02d %02d:%02d:%02d-%lu] %s\n",
			current->comm, current->pid, task->comm, task->pid,
			tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min,
			tm.tm_sec, ts.tv_nsec, info);
}

static void idump_finish(struct mm_struct *mm, struct task_struct *t)
{
	struct core_thread *curr = NULL;
	struct core_thread *next = NULL;
	struct task_struct *task = NULL;
	struct rtos_mm_struct *rtos_mm = mm_to_rtos_mm(mm);

	idump_show_func(t);

	mmap_write_lock(mm);
	idump_control_debug(t, "down_write\n");
	next = rtos_mm->idump.core_state->dumper.next;
	rtos_mm->idump.core_state = NULL;
	mmap_write_unlock(mm);
	idump_control_debug(t, "up_write\n");
	while ((curr = next) != NULL) {
		next = curr->next;
		task = curr->task;
		/*
		 * see exit_mm(), curr->task must not see
		 * ->task == NULL before we read ->next.
		 */
		smp_mb();
		curr->task = NULL;
		wake_up_state(task, __TASK_STOPPED);
		idump_control_debug(task, "wake_up_state\n");
	}
}

static int idump_stop_threads(struct task_struct *tsk, struct mm_struct *mm,
		struct core_state *idump_core_state)
{
	struct task_struct *t;
	int nr = 0;
	int allow = 0;
	struct rtos_mm_struct *rtos_mm = mm_to_rtos_mm(mm);

	idump_show_func(tsk);
	rtos_mm->idump.core_state = idump_core_state;
	read_lock(&tasklist_lock);
	t = tsk;
	do {
		if (t == current)
			continue;
		allow = task_allow_idump(t);
		if (allow == 0) {
			signal_wake_up(t, 1);
			idump_control_debug(t, "signal_wake_up\n");
			nr++;
		} else {
			nr = -1;
			pr_info("[idump][trigger:%s %d][task:%s %d][func: %s][thread:%s %d] not allow dump %d %s\n",
					  current->comm, current->pid,
					  tsk->comm, tsk->pid, __func__,
					  t->comm, t->pid, allow,
					  no_allow_case[allow]);
			break;
		}
	} while_each_thread(tsk, t);
	read_unlock(&tasklist_lock);
	/*
	 * idump no exit task,
	 * comment "tsk->signal->group_exit_task = tsk;"
	 *
	 * ignore all signals except SIGKILL, see prepare_signal()
	 * idump can't ignore other signal,
	 * comment "tsk->signal->flags = SIGNAL_STOP_STOPPED;"
	 *
	 * for coredump tsk=current, but not suitable for idump.
	 * comment "clear_tsk_thread_flag(tsk, TIF_SIGPENDING);"
	 */

	if (likely(nr > 0)) {
		/*
		 * this case can't stop all thread for mm->mm_users
		 * vfork: if vfork no execv and modification mm, the core maybe error
		 * get_task_mm: can't find all thread get this task mm ...
		 *
		 * here -1 is because do_dump_trigger use get_task_mm,
		 */
		if (((tsk == current) &&
		     (atomic_read(&mm->mm_users) - 1 != nr + 1)) ||
		    ((tsk != current) &&
		     (atomic_read(&mm->mm_users) - 1 != nr)))
			pr_info("[idump][trigger:%s %d][task:%s %d][func: %s] mm->mm_users:%d != task_threads:%d\n",
					  current->comm, current->pid,
					  tsk->comm, tsk->pid, __func__,
					  atomic_read(&mm->mm_users), nr);

		atomic_set(&idump_core_state->nr_threads, nr);
	}

	return nr;

	/*
	 * PF_DUMPCORE used in __alloc_pages_slowpath,
	 * idump maybe can't use PF_DUMPCORE,
	 * comment "tsk->flags = PF_DUMPCORE;"
	 */
}

static void show_task_d(struct task_struct *task)
{
	struct task_struct *p;

	read_lock(&tasklist_lock);
	if (task->mm == NULL) {
	/*
	 * maybe task is dying, can't call while_each_thread
	 * do_exit
	 *	exit_mm
	 *		task->mm=NULL;
	 *	exit_notify
	 *		release_task
	 *			write_lock_irq(&tasklist_lock);
	 *			__exit_signal(p);
	 *				__unhash_process(tsk, group_dead);
	 *					list_del_rcu(&p->thread_group);
	 *			write_unlock_irq(&tasklist_lock);
	 */
		read_unlock(&tasklist_lock);
		return;
	}

	p = task;
	do {
		if ((unsigned long)p->state & TASK_UNINTERRUPTIBLE) {
			/*
			 * reset the NMI-timeout, listing all files on a slow
			 * console might take a lot of time:
			 */
			touch_nmi_watchdog();
			sched_show_task(p);
		}
	} while_each_thread(task, p);
	read_unlock(&tasklist_lock);
}

static int idump_wait(struct core_state *idump_core_state, struct task_struct *tsk, struct mm_struct *mm)
{
	int core_waiters = -EBUSY;
	unsigned long timeout_msec;
	struct rtos_mm_struct *rtos_mm = mm_to_rtos_mm(mm);

	idump_show_func(tsk);
	init_completion(&idump_core_state->startup);
	/* as list head */
	idump_core_state->dumper.task = tsk;
	idump_core_state->dumper.next = NULL;

	mmap_write_lock(mm);
	idump_control_debug(tsk, "down_write\n");
	if (!rtos_mm->idump.core_state)
		core_waiters = idump_stop_threads(tsk, mm, idump_core_state);
	mmap_write_unlock(mm);
	idump_control_debug(tsk, "up_write\n");

	if (likely(core_waiters > 0)) {
		struct core_thread *ptr;

		timeout_msec = rtos_mm->idump.timeout_msec;
		if (!wait_for_completion_timeout(&idump_core_state->startup, msecs_to_jiffies(timeout_msec))) {
			pr_info("[idump][trigger:%s %d][task:%s %d][func: %s]wait_for_completion_timeout\n",
					current->comm, current->pid, tsk->comm, tsk->pid, __func__);
			show_task_d(tsk);
			idump_finish(mm, tsk);
			return -EBUSY;
		}

		/*
		 * Wait for all the threads to become inactive, so that
		 * all the thread context (extended register state, like
		 * fpu etc) gets copied to the memory.
		 */
		ptr = idump_core_state->dumper.next;
		while (ptr != NULL) {
			wait_task_inactive(ptr->task, 0);
			ptr = ptr->next;
		}
	}

	if (unlikely(core_waiters == -1)) {
		show_task_d(tsk);
		idump_finish(mm, tsk);
		return -EBUSY;
	}

	return core_waiters;
}

int do_dump_trigger(struct task_struct *task)
{
	struct core_state idump_core_state;
	struct core_name cn;
	struct mm_struct *mm;
	struct rtos_mm_struct *rtos_mm;
	struct linux_binfmt *binfmt;
	const struct cred *old_cred;
	struct cred *cred;
	const struct cred *tsk_cred;
	int retval = -EPERM;
	int ispipe;
	/* require nonrelative corefile path and be extra careful */
	bool need_suid_safe = false;
	bool core_dumped = false;
	static atomic_t core_dump_count = ATOMIC_INIT(0);
	kernel_siginfo_t idump_sig_info = {
		.si_signo = SIGUSR1,
		.si_errno = 0,
		.si_code = SI_KERNEL,
		.si_pid = 0,
		.si_uid = 0,
	};
	struct rtos_coredump_params rtos_cprm;
#define cprm rtos_cprm.cprm
	int allow = 0;
	size_t *argv = NULL;
	int argc = 0;

	if (idump_trigger_enable != IDUMP_TRIGGER_ENABLE) {
		printk("[idump][func:%s] dump trigger disabled!\n", __func__);
		return -ENOENT;
	}

	idump_show_func(task);

	memset(&idump_core_state, 0, sizeof(struct core_state));
	memset(&rtos_cprm, 0, sizeof(struct rtos_coredump_params));

	if (!task) {
		printk("[idump][func:%s] fail task==NULL\n", __func__);
		goto fail_no_mm;
	}

	print_idump_time(task, "start");

	mm = get_task_mm(task);
	if (!mm) {
		pr_info("[idump][task:%s %d][func:%s] fail get_task_mm\n",
				task->comm, task->pid, __func__);
		goto fail_no_mm;
	}
	rtos_mm = mm_to_rtos_mm(mm);
	cprm.siginfo = &idump_sig_info;
	cprm.regs = task_pt_regs(task);
	cprm.limit = task_rlimit(task, RLIMIT_CORE);
	cprm.mm_flags = mm->flags;
	rtos_cprm.dump_filter_type = rtos_mm->idump.dump_filter_type;
	rtos_cprm.task = task;

#ifdef CONFIG_RTOS_DEBUG_DUMP_USER_PROCESS
	dump_user_exception(task, cprm.siginfo);
#endif

#ifdef CONFIG_AUDITSYSCALL
	/*
	 * log for current exit with coredump. but idump not exit task,
	 * so, maybe no need and can't log for idump
	 */
	audit_idumps(task);
#endif

	binfmt = mm->binfmt;
	if (!binfmt || !binfmt->core_dump) {
		pr_info("[idump][task:%s %d][func:%s] fail binfmt or binfmt->core_dump is NULL\n",
				  task->comm, task->pid, __func__);
		goto fail;
	}
	if (!__get_dumpable(cprm.mm_flags)) {
		pr_info("[idump][task:%s %d][func:%s] fail !__get_dumpable(cprm.mm_flags)\n",
				  task->comm, task->pid, __func__);
		goto fail;
	}

	allow = task_allow_idump(task);
	if (allow != 0) {
		pr_info("[idump][task:%s %d][func: %s] task_allow_idump fail, %d %s\n",
				task->comm, task->pid, __func__, allow, no_allow_case[allow]);
		goto fail;
	}

	if (task_being_idump(task) != 0) {
		pr_info("[idump][task:%s %d][func: %s] task_being_idump fail\n",
				task->comm, task->pid, __func__);
		goto fail;
	}

	cred = prepare_creds();
	if (!cred) {
		pr_info("[idump][task:%s %d][func:%s] fail prepare_creds() return  NULL\n",
				task->comm, task->pid, __func__);
		goto fail;
	}
	/*
	 * We cannot trust fsuid as being the "true" uid of the process
	 * nor do we know its entire history. We only know it was tainted
	 * so we dump it as root in mode 2, and only into a controlled
	 * environment (pipe handler or fully qualified path).
	 */
	if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
		/* Setuid core dump mode */
		cred->fsuid = GLOBAL_ROOT_UID;	/* Dump root private */
		need_suid_safe = true;
	}

	print_idump_time(task, "waiting");
	retval = idump_wait(&idump_core_state, task, mm);
	if (retval < 0) {
		pr_info("[idump][trigger:%s %d][task:%s %d tgid:%d][func:%s] fail idump_wait return < 0\n",
				  current->comm, current->pid, task->comm,
				  task->pid, task->tgid, __func__);
		goto fail_creds;
	}

	/* here over write current->creds, because file open by current */
	old_cred = override_creds(cred);

	tsk_cred = get_cred(task->cred);
	ispipe = format_corename(&cn, &cprm, &argv, &argc);
	put_cred(tsk_cred);

	if (ispipe) {
		int argi;
		int dump_count;
		char **helper_argv;
		struct subprocess_info *sub_info;

		if (ispipe < 0) {
			pr_warn("format_corename failed\n");
			pr_warn("Aborting core\n");
			pr_info("[idump][task:%s %d][func:%s] fail ispipe < 0\n",
					task->comm, task->pid, __func__);
			goto fail_unlock;
		}

		if (cprm.limit == 1) {
			/*
			 * See umh_pipe_setup() which sets RLIMIT_CORE = 1.
			 *
			 * Normally core limits are irrelevant to pipes, since
			 * we're not writing to the file system, but we use
			 * cprm.limit of 1 here as a speacial value, this is a
			 * consistent way to catch recursive crashes.
			 * We can still crash if the core_pattern binary sets
			 * RLIM_CORE = !1, but it runs as root, and can do
			 * lots of stupid things.
			 *
			 * Note that we use task_tgid_vnr here to grab the pid
			 * of the process group leader.  That way we get the
			 * right pid if a thread in a multi-threaded
			 * core_pattern process dies.
			 */
			pr_warn("Process %d(%s) has RLIMIT_CORE set to 1\n",
					task_tgid_vnr(task), task->comm);
			pr_warn("Aborting core\n");
			pr_info("[idump][task:%s %d][func:%s] fail cprm.limit == 1\n",
					task->comm, task->pid, __func__);
			goto fail_unlock;
		}
		cprm.limit = RLIM_INFINITY;

		dump_count = atomic_inc_return(&core_dump_count);
		if (core_pipe_limit && (core_pipe_limit < dump_count)) {
			pr_warn("Pid %d(%s) over core_pipe_limit\n",
					task_tgid_vnr(task), task->comm);
			pr_warn("Skipping core dump\n");
			pr_info("[idump][task:%s %d][func:%s] fail core_pipe_limit < dump_count\n",
					task->comm, task->pid, __func__);
			goto fail_dropcount;
		}

		helper_argv = argv_split(GFP_KERNEL, cn.corename, NULL);
		if (!helper_argv) {
			pr_warn("%s failed to allocate memory\n",
					__func__);
			pr_info("[idump][task:%s %d][func:%s] fail argv_split cn.corename return NULL\n",
					task->comm, task->pid, __func__);
			goto fail_dropcount;
		}
		for (argi = 0; argi < argc; argi++)
			helper_argv[argi] = cn.corename + argv[argi];
		helper_argv[argi] = NULL;

		retval = -ENOMEM;
		sub_info = call_usermodehelper_setup(helper_argv[0],
				helper_argv, NULL, GFP_KERNEL,
				umh_pipe_setup, NULL, &cprm);
		if (sub_info)
			retval = call_usermodehelper_exec(sub_info,
					UMH_WAIT_EXEC);

		argv_free(helper_argv);
		if (retval) {
			pr_info("Core dump to |%s pipe failed\n",
					cn.corename);
			goto close_fail;
		}
	} else {
		struct inode *inode;

		if (cprm.limit < binfmt->min_coredump) {
			pr_info("[idump][task:%s %d][func:%s] fail cprm.limit < binfmt->min_coredump\n",
					task->comm, task->pid, __func__);
			goto fail_unlock;
		}

		if (need_suid_safe && cn.corename[0] != '/') {
			pr_warn("Pid %d(%s) can only dump core "\
					"to fully qualified path!\n",
					task_tgid_vnr(task), task->comm);
			pr_warn("Skipping core dump\n");
			pr_info("[idump][task:%s %d][func:%s] fail cn.corename[0] != '/'\n",
					task->comm, task->pid, __func__);
			goto fail_unlock;
		}

		/*
		 * Unlink the file if it exists unless this is a SUID
		 * binary - in that case, we're running around with root
		 * privs and don't want to unlink another user's coredump.
		 */
		if (!need_suid_safe) {
			/*
			 * If it doesn't exist, that's fine. If there's some
			 * other problem, we'll catch it at the filp_open().
			 */
			do_unlinkat(AT_FDCWD, getname_kernel(cn.corename));
		}

		/*
		 * There is a race between unlinking and creating the
		 * file, but if that causes an EEXIST here, that's
		 * fine - another process raced with us while creating
		 * the corefile, and the other process won. To userspace,
		 * what matters is that at least one of the two processes
		 * writes its coredump successfully, not which one.
		 */
		cprm.file = filp_open(cn.corename,
				O_CREAT | O_RDWR | O_NOFOLLOW |
				O_LARGEFILE | O_EXCL,
				0600);
		if (IS_ERR(cprm.file)) {
			pr_info("[idump][task:%s %d][func:%s] fail filp_open cn.corename with err(%ld)\n",
					task->comm, task->pid, __func__, PTR_ERR(cprm.file));
			goto fail_unlock;
		}

		inode = file_inode(cprm.file);
		if (inode->i_nlink > 1) {
			pr_info("[idump][task:%s %d][func:%s] fail corename inode->i_nlink > 1\n",
					task->comm, task->pid, __func__);
			goto close_fail;
		}
		if (d_unhashed(cprm.file->f_path.dentry)) {
			pr_info("[idump][task:%s %d][func:%s] fail d_unhashed(cprm.file->f_path.dentry) \n",
					task->comm, task->pid, __func__);
			goto close_fail;
		}
		/*
		 * AK: actually i see no reason to not allow this for named
		 * pipes etc, but keep the previous behaviour for now.
		 */
		if (!S_ISREG(inode->i_mode)) {
			pr_info("[idump][task:%s %d][func:%s] fail !S_ISREG(inode->i_mode)\n",
					task->comm, task->pid, __func__);
			goto close_fail;
		}
		/*
		 * Dont allow local users get cute and trick others to coredump
		 * into their pre-created files.
		 */
		if (!uid_eq(inode->i_uid, current_fsuid())) {
			/* file open by current, so use current_fsuid */
			pr_info("[idump][task:%s %d][func:%s] fail uid_eq(inode->i_uid, current_fsuid())\n",
					task->comm, task->pid, __func__);
			goto close_fail;
		}
		if (!(cprm.file->f_mode & FMODE_CAN_WRITE)) {
			pr_info("[idump][task:%s %d][func:%s] fail !(cprm.file->f_mode & FMODE_CAN_WRITE)\n",
					task->comm, task->pid, __func__);
			goto close_fail;
		}
		if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file)) {
			/* file open by current, check it */
			pr_info("[idump][task:%s %d][func:%s] fail do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file)\n",
					  task->comm, task->pid, __func__);
			goto close_fail;
		}
	}

	if (!dump_interrupted()) {
		print_idump_time(task, "dumping");
		if (!dump_vma_snapshot(&cprm))
			goto close_fail;
		file_start_write(cprm.file);
		mmap_read_lock(mm);
		idump_control_debug(task, "down_read");
		core_dumped = binfmt->core_dump(&cprm);
		mmap_read_unlock(mm);
		idump_control_debug(task, "up_read");
		file_end_write(cprm.file);
	}
	if (core_dumped == true)
		retval = 0;

	if (ispipe && core_pipe_limit)
		wait_for_dump_helpers(cprm.file);

close_fail:
	if (cprm.file)
		filp_close(cprm.file, NULL);
fail_dropcount:
	if (ispipe)
		atomic_dec(&core_dump_count);
fail_unlock:
	kfree(argv);
	kfree(cn.corename);
	idump_finish(mm, task);
	revert_creds(old_cred);
fail_creds:
	put_cred(cred);
fail:
	mmput(mm);
	print_idump_time(task, "finish");
fail_no_mm:
	return retval;
}

void check_idump_trigger(struct ksignal *ksig)
{
	if (current && current->mm && (ksig->ka.sa.sa_handler != SIG_DFL)) {
		struct rtos_mm_struct *rtos_mm = mm_to_rtos_mm(current->mm);
		/*
		 * Force dump the core file if the signr buf is
		 * set in /proc/pid/coredump_force
		 */
		if (test_bit((ksig->sig - 1), rtos_mm->idump.sig_force))
			do_dump_trigger(current);
	}
}

void set_idump_trigger_enable(void)
{
	if (idump_trigger_enable_by_proc != 0)
		idump_trigger_enable = IDUMP_TRIGGER_ENABLE;
}
#endif
