/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2019-2019.
 * Description: coredump on syscall exit or exit_group
 * Author: nixiaoming
 * Create: 2019-03-02
 */

#include <linux/coredump.h>
#include <linux/rtos/coredump_on_exit.h>

#include <linux/idump.h>

/* only call by rtos_mm_struct_init, rtos_mm != NULL */
void init_dump_on_exit(struct rtos_mm_struct *rtos_mm)
{
	rtos_mm->dump_on_exit = NO_DUMP_ON_EXIT;
}

void coredump_on_exit(int sig, int error_code, bool is_group_exit)
{
	struct rtos_mm_struct *rtos_mm = NULL;
	struct signal_struct *signal = NULL;
	unsigned int old_flags;
	int old_group_exit_code;

	kernel_siginfo_t dump_sig_info = {
		.si_signo = sig, /* maybe no real signal */
		.si_errno = error_code,
		.si_code = SI_KERNEL,
		.si_pid = 0,
		.si_uid = 0,
	};

	if (current->mm == NULL || current->mm->core_state != NULL)
		return;
	if (sig_kernel_ignore(dump_sig_info.si_signo) ||
			sig_kernel_stop(dump_sig_info.si_signo))
		return;

	if (current->vfork_done)
		return;

	spin_lock_irq(&current->sighand->siglock);
	signal = current->signal;
	/* do_coredump only get core when !signal_group_exit(current->signal) */
	if (!(signal->flags & SIGNAL_GROUP_EXIT) && !thread_group_empty(current) &&
			!is_group_exit) {
		spin_unlock_irq(&current->sighand->siglock);
		return;
	}

	rtos_mm = mm_to_rtos_mm(current->mm);
	if (rtos_mm->dump_on_exit != NEED_DUMP_ON_EXIT) {
		spin_unlock_irq(&current->sighand->siglock);
		return;
	}

#if defined(CONFIG_RTOS_IDUMP_FORCE) || defined(CONFIG_RTOS_IDUMP_TRIGGER)
	if (idump_trigger_enable == IDUMP_TRIGGER_ENABLE) {
		printk_once(KERN_INFO "idump trigger enable, do not coredump on exit\n");
		return;
	}
#endif

	rtos_mm->dump_on_exit = BEING_DUMP_ON_EXIT;
	old_flags = signal->flags;
	old_group_exit_code = signal->group_exit_code;
	signal->flags &= (~SIGNAL_GROUP_EXIT);
	spin_unlock_irq(&current->sighand->siglock);

	do_coredump(&dump_sig_info);

	spin_lock_irq(&current->sighand->siglock);
	signal->flags = old_flags;
	signal->group_exit_code = old_group_exit_code;
	spin_unlock_irq(&current->sighand->siglock);
	rtos_mm->dump_on_exit = NO_DUMP_ON_EXIT; /* avoid re-entry */
}

void exit_mm_wait_dump_start(struct mm_struct *mm)
{
	struct rtos_mm_struct *rtos_mm = mm_to_rtos_mm(mm);

retry:
	if (!mm->core_state && rtos_mm->dump_on_exit == BEING_DUMP_ON_EXIT) {
		mmap_read_unlock(mm);
		schedule_timeout_interruptible(1);
		mmap_read_lock(mm);
		goto retry;
	}
}
