// SPDX-License-Identifier: GPL-2.0-only
/*
 * Background:
 * Rollback is required when the upgrade fails. If the rollback occurs when
 * the context is interrupted, run `kernel_kexec()`.
 * The hang occurs because `kernel_kexec()` sleeps, schedules, and uses ipi
 * during execution.
 * Know other core executions (`smp_call_function_single()`).
 * Therefore, the panic core becomes schedulable and a worker is created
 * on the reboot core for execution.
 * `kernel_kexec()`.
 *
 * Fundamental:
 * The CPU does not care about the state when executing (x86_64 ring0, arm64 el1)
 * in the kernel space,
 * The difference between the processer mode lies in the occupied resources.
 * For example, there is an independent stack in the irq ctx.
 * The irq controller is in the active state.
 *
 * Terminology:
 * - Panic: refers to the unrecoverable problems triggered by `panic()` and `die()`,
 *   which occur only in the kernel mode.
 * - Multiple panics: indicates that multiple panics occur on the same CPU.
 * - Panic nesting: When a CPU panic occurs, the call stack has multiple contexts,
 *   for example, CPU.
 * Runs in the process context. irq makes it enter the hardirq context and becomes
 * the process context nested hardirq.
 *Context
 *
 * NOTICE:
 * 1. The try best policy does not guarantee successful rollback.
 * Cause: Unreliable memory scenarios cannot be covered.
 * 2. A panic occurs in the irq context. The rollback success depends on the panic
 * that does not occur in such as `irq_enter()`.
 * General processing flow, because the current irq eoi logic depends on this function.
 * 3. Multiple panics cannot occur on the same CPU, and panics cannot occur on the CPU
 * that is performing rollback.
 *
 * Design:
 *
 * - Rollback triggering factors:
 * - call `panic()`
 * - call `die()`
 *
 * - Rollback triggering time
 * - Panic trigger
 * - Triggered by the timeout detection mechanism invoking `panic()`
 * 1. The system startup phase is triggered by angel core.
 * 2. Triggered by the watchdog during system running
 *
 * - Type of the context in which the panic occurs:
 * - In the process context (task context)
 * - Exception context (exception context)
 * - Interrupt context
 * - Software interrupt context (softirq context)
 * - Hard interrupt context (hardirq context)
 * - Non-maskable interrupt context (nmi context)
 *
 * - panic nesting principle
 * - Interrupts can be nested in the process context.
 * - The soft interrupt context can nest hard interrupts and non-maskable interrupts.
 * - The hard interrupt context can nest non-maskable interrupts.
 * - Exceptions can be nested in any context
 *
 * - Rollback Scenario Summary
 *
 * --------------------------+--------------------+----------------------------
 * Scenario | Process Context | Interrupt Context
 * --------------------------+----------+---------+---------+---------+--------
 * | user app | kthread | softirq | hardirq | nmi
 * ----------+---------------+----------+---------+---------+---------+--------
 * One panic | support
 * ----------+---------------+----------+---------+---------+---------+--------
 * Multiple panics | Same CPU | Not supported
 * +---------------+----------+---------+---------+---------+--------
 * | CPU being rolled back | Not supported
 * +---------------+----------+---------+---------+---------+--------
 * | Normal CPU | Not supported
 * ----------+---------------+----------+---------+---------+---------+--------
 *
 * - Use the timeout mechanism to ensure that the system does not die completely.
 * 1. The timeout restart mechanism is configured for the CPU recovered from the panic to
 * prevent the scenario where the IPI cannot be processed.
 * 2. Configure the hrtimer mechanism to prevent multiple infinite loops on the same CPU.
 * 3. Depends on the NMI deadlock detection mechanism when the hrtimer does not work.
 */

#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/kexec.h>
#include <linux/nmi.h>
#include <linux/reboot.h>
#include <linux/threads.h>
#include <linux/delay.h>
#include <linux/vtime.h>
#include <linux/hardirq.h>
#include <linux/hrtimer.h>
#include <linux/jiffies.h>
#include <linux/console.h>
#ifdef CONFIG_EULEROS_ANGEL_CORE_MONITOR
#include <asm/angel_core.h>
#endif /* CONFIG_EULEROS_ANGEL_CORE_MONITOR */

bool kexec_panic_rollback;	/* the flag that make x86 use nmi to stop core */
static int roll_cpu = -1;
static int nested = -1;
static unsigned long roll_event_start;
static DEFINE_SPINLOCK(nested_lock);
static DECLARE_BITMAP(panic_cpus, num_possible_cpus());

bool try_own_spin_lock(raw_spinlock_t *lock)
{
	bool work = false;
	const int max_times = 3;
	const int sleep_range = 100;
	int i;

	for (i = 0; i < max_times; i++) {
		if (raw_spin_trylock(lock)) {
			raw_spin_unlock(lock);
			work = true;
			break;
		}
		mdelay(sleep_range);
	}
	return work;
}

static void printk_safe_flush_on_roll(void)
{
	printk_safe_flush_on_panic();
}

static void safe_emergency_restart(void)
{
	/* dont't print anything to avoid deadlock */
	emergency_restart();
}

static enum hrtimer_restart reboot_hrtimer_fn(struct hrtimer *timer)
{
	pr_emerg("hrtimer timeout, start emergency restart at cpu #%d\n",
			smp_processor_id());

	if (panic_reboot_mode != REBOOT_UNDEFINED)
		reboot_mode = panic_reboot_mode;

	safe_emergency_restart();

	return HRTIMER_NORESTART;
}

static void reboot_hrtimer_init(unsigned long sample_period)
{
	static struct hrtimer hrtimer;
	static atomic_t initialized = ATOMIC_INIT(0);

	if (atomic_xchg(&initialized, 1) == 1)
		return;

	hrtimer_init(&hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
	hrtimer.function = reboot_hrtimer_fn;
	/* don't pin to make it can be triggered anywhere */
	hrtimer_start(&hrtimer, ms_to_ktime(sample_period),
		      HRTIMER_MODE_REL_HARD);

	pr_emerg("Start hrtimer monitor timer at cpu #%d\n", smp_processor_id());
}

static void irq_kexec_work(struct work_struct *work)
{
	pr_emerg("Start 1st interrupt kexec from cpu #%d pid %d comm %s\n",
			smp_processor_id(), current->pid, current->comm);

	kernel_kexec();
}

static DECLARE_WORK(irq_kexec_worker, irq_kexec_work);

int __weak arch_exit_nmi_ctx(void) { return 0; }
int __weak arch_exit_irq_ctx(void) { return 0; }
void __weak arch_call_on_panic_stack(void *arg, void (*func)(void *arg)) { func(arg); };

void panic_irq_exit(void);

static int exit_irq_ctx(void)
{
	/* if irq is nested, nmi is on the top, therefore eoi nmi first. */
	if (in_nmi()) {
		WARN_ON((preempt_count() & NMI_MASK) != NMI_OFFSET);
		if (arch_exit_nmi_ctx() != 0)
			return -1;
	}

	if (in_irq()) {
		/*
		 * Linux don't allow hardirq nest, hardirq count should be 1 in
		 * theory, otherwise warning here.
		 */
		WARN_ON(hardirq_count() != HARDIRQ_OFFSET);
		if (arch_exit_irq_ctx() != 0)
			return -1;

		/*
		 * Q: What happened when panic in `panic_irq_exit()` procedure?
		 * e.g. illegal memory access.
		 *
		 * A: it's panic then panic condition at the same core,
		 * emergency restart will trigger.
		 */
		panic_irq_exit();
	}

	/* ignore softirq */
	return 0;
}

void __weak arch_enable_irq(void)
{
	local_irq_enable();
}

unsigned long get_kexec_timeout(void)
{
#ifdef CONFIG_EULEROS_ANGEL_CORE_MONITOR
	return get_last_angel_timeout();
#else
	return DEFAULT_KEXEC_ROLLBACK_TIMEOUT_MS;
#endif
}

static void set_cpu_affinity(void)
{
	/* idle context */
	if (current->pid == 0)
		return;

	/* non-idle context */
	set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
}

void fixup_pmuevent(void);
int fixup_hrtimer(void);

static int fixup_lockup(void)
{
	/* If the panic comes from pmu event, the pmu need restart */
	fixup_pmuevent();

	/* If the panic comes from hrtimer, the hrtimer need reprogram */
	if (fixup_hrtimer() != 0)
		return -1;

	return 0;
}

static void panic_schedule_timeout(unsigned long timeout)
{
	unsigned long start, now, interval;

	start = jiffies;
	interval = 0;

	/*
	 * Wait second to prevent deadlock.
	 * Can't delete those lines, there maybe the condition that shutdown
	 * procedure is too long: systemd is wait the process stop, but
	 * something happens to make the process always is waitting...
	*/
	while (interval < timeout) {
		/* loop here until the irq is received or timeout */
		schedule_timeout(msecs_to_jiffies(timeout));
		now = jiffies;
		interval = jiffies64_to_msecs(now - start);
	}

	pr_emerg("timer timeout, start emergency restart at cpu #%d\n",
			smp_processor_id());
	/*
	 * Can't back to the original panic procedure.
	 * Here the same as original emergency reboot procedure.
	 */
	if (panic_reboot_mode != REBOOT_UNDEFINED)
		reboot_mode = panic_reboot_mode;
	safe_emergency_restart();

	unreachable();
}

/* must set `noinline` decorator to prevent inline */
static noinline void irq_kexec_ctx(void *arg)
{
	bool roll = (bool)arg;
	unsigned long timeout = get_kexec_timeout();

	/* don't allow the ctx migrate to another cpu */
	set_cpu_affinity();

	if (fixup_lockup() != 0)
		return;
	/* end interrupt */
	if (exit_irq_ctx() != 0)
		return;

	preempt_count_set(PREEMPT_ENABLED);
	/* enable hardirq to allow responsing irq */
	arch_enable_irq();

	/*
	 * If `panic()` comes from the `watchdog_timer_fn()`, the watchdog
	 * routine won't work, because the routine don't return, and can't
	 * restart. Therefore here set hrtimer triggered by hwirq to monitor
	 * the paniced cpu.
	 *
	 * Case: dead loop in softirq after 1st panic
	 */
	reboot_hrtimer_init(timeout);

	if (roll)
		queue_work_on(roll_cpu, system_highpri_wq, &irq_kexec_worker);

	pr_emerg("Start timer monitor timer at cpu #%d\n", smp_processor_id());
	/* flush the message */
	printk_safe_flush_on_roll();

	panic_schedule_timeout(timeout);
}

static void irq_kexec_trampoline(bool roll)
{
	arch_call_on_panic_stack((void *)roll, irq_kexec_ctx);

	/*
	 * here return is safety to return, because it only fail when eoi
	 * handler has been not set during irq initialization, which means eoi
	 * has never be sent.
	 */
	pr_err("rollback failed, back to the original panic procedure\n");
}

static void first_panic_trampoline(void)
{
#ifdef CONFIG_EULEROS_ANGEL_CORE_MONITOR
	/* notifier minotor core to step in recycle status */
	if (is_angel_core_section_valid())
		retire_angel();
#endif /* CONFIG_EULEROS_ANGEL_CORE_MONITOR */

	/*
	 * We can't call kernel_exec() in irq context because of the
	 * `device_shutdown()` during the procedure. Some shutdown,
	 * likes `sd_shutdown()`, will sleep and lead to re-schedule or irq.
	 * However, if the irq requisitioned idle context, re-scheduling need
	 * preemption count setting, and irq need eoi to make the current irq
	 * inactive because the nested irq is disallowed.
	 *
	 * If we ignore `device_shutdown()`, the next step
	 * `migrate_to_reboot_cpu()` will also lead to crash the same as
	 * `device_shutdown()` if the current cpu isn't `reboot_cpu`.
	 *
	 * Therefore, a schedulable context is needed for the current cpu.
	 * If the task isn't in unsleep context, we can switch stack and
	 * set preempt count, irq status to make the current task
	 * re-schedulable.
	 */
	if (!in_interrupt()) {
		pr_emerg("start quick_kernel_kexec roll in non-irq ctx\n");
		kernel_kexec();
	} else {
		pr_emerg("start quick_kernel_kexec roll in irq ctx\n");
		WRITE_ONCE(kexec_panic_rollback, true);

		irq_kexec_trampoline(true);
	}
}

static void second_panic_trampoline(void)
{
	if (!in_interrupt()) {
		unsigned long timeout = get_kexec_timeout();

		panic_schedule_timeout(timeout);
	} else {
		irq_kexec_trampoline(false);
	}
}

static bool set_panic_cpu(int cpu, int nested)
{
	return !test_and_set_bit(cpu, panic_cpus);
}

static bool cpu_nested_panic(int cpu)
{
	return test_bit(cpu, panic_cpus);
}

void nmi_emergey_restart(int cpu)
{
	/*
	 * Someone hangs during kexec procedure, nmi detects the condition,
	 * just emergency restart after backtrace.
	 */
	if (!kexec_in_progress || !is_quick_rollback_enable() || !has_quick_rollback_ability())
		return;

	if (cpu_nested_panic(cpu)) {
		pr_emerg("emergency restart by NMI\n");
		safe_emergency_restart();
	}
}

static void select_roll_cpu(void)
{
	int cpu = reboot_cpu;

	/* disable hotplug to prevent reboot_cpu migrate */
	cpu_hotplug_disable();
	if (!cpu_online(cpu))
		cpu = cpumask_first(cpu_online_mask);

	/* set rolling cpu */
	set_bit(cpu, panic_cpus);
	roll_cpu = cpu;
}

#ifdef CONFIG_PREEMPT_COUNT
static int detect_atomic_lock(void)
{
	unsigned int atomic_lock_nr;

	/*
	 * preempt count format:
	 *           PREEMPT_MASK:	0x0000000ff
	 *           SOFTIRQ_MASK:	0x00000ff00
	 *           HARDIRQ_MASK:	0x0000f0000
	 *               NMI_MASK:	0x000f00000
	 *  PREEMPT_NEED_RESCHED (x86):	0x080000000
	 *  PREEMPT_NEED_RESCHED (arm):	0x100000000
	 *
	 * If `preempt_count()` isn't 0, it means the unslepp lock exists, e.g.
	 * spin lock. Try its best to roll.
	 */
	atomic_lock_nr = (preempt_count() & PREEMPT_MASK);
	if (atomic_lock_nr != 0) {
		pr_crit("In atomic lock context (nr %u), give up rolling\n",
				atomic_lock_nr);
		return -1;
	}

	return 0;
}
#else /* CONFIG_PREEMPT_COUNT */
static int detect_atomic_lock(void) { return 0; }
#endif /* CONFIG_PREEMPT_COUNT */

static bool rolling_timeout(void)
{
	int timeout = false;

	if (READ_ONCE(roll_event_start) == 0) {
		WRITE_ONCE(roll_event_start, jiffies);
	} else {
		unsigned long now, interval;

		now = jiffies;
		interval = jiffies64_to_msecs(now - READ_ONCE(roll_event_start));

		if (interval >= get_kexec_timeout())
			timeout = true;
	}

	return timeout;
}

bool try_own_printk_locks(void);
bool try_own_safe_printk_locks(void);

void try_rollback(void)
{
	int cpu = smp_processor_id();
	bool can_roll;
	unsigned long flags;

	suppress_printk = 1;

	if (rolling_timeout())
		return;

	/* panic may happened when `logbuf_lock` or `safe_read_lock` is locked. */
	if (!try_own_printk_locks() || !try_own_safe_printk_locks())
		return;

	if (detect_atomic_lock() != 0)
		return;

	spin_lock_irqsave(&nested_lock, flags);
	nested += 1;
	can_roll = set_panic_cpu(cpu, nested);
	spin_unlock_irqrestore(&nested_lock, flags);

	if (nested == 0) {
		select_roll_cpu();
		first_panic_trampoline();
	} else if (!can_roll) {
		pr_emerg("Detect panic then panic in the same cpu #%d,
			  step in the original procedure.\n", cpu);
	} else {
		/* wait roll_cpu ready */
		while (roll_cpu == -1)
			udelay(1);

		if (roll_cpu == cpu) {
			pr_emerg("Detect panic in rolling cpu #%d, give up roll\n",
					roll_cpu);
		} else {
			pr_emerg("Detect %d times panic in cpu #%d, try to continue roll.\n",
					nested + 1, cpu);
			second_panic_trampoline();
		}
	}
}

/* only called by `IPI_KEXEC_ROLLBACK` irq, it safe to use without check lock */
void boot_try_rollback(void)
{
	static DECLARE_WORK(boot_kexec_worker, irq_kexec_work);

#ifdef CONFIG_EULEROS_ANGEL_CORE_MONITOR
	/*
	 * The angel core starts after `init_IRQ()`. The rollback can happen
	 * between `angel_start_monitor()` and `workqueue_init()`, just execute
	 * roll directly, device initialization begins after workqueue init.
	 * After workqueue works, it can use workqueue to execute roll.
	 */
	if (!get_angel_core_event())
		return;

	clear_angel_core_event();
#endif /* CONFIG_EULEROS_ANGEL_CORE_MONITOR */

	/* don't care about the condition before smp init  */
	if (!is_smp_init_finished()) {
		pr_emerg("smp not finish, need reboot....\n");
		safe_emergency_restart();
	} else if (!is_quick_rollback_enable() || !has_quick_rollback_ability()) {
		pr_emerg("don't have roll ability, restart!\n");
		safe_emergency_restart();
	}

	/*
	 * This routine is just called from angel core irq, it's safe to start
	 * workqueue, then return directly. `check_rolling_cpu()` just tag.
	 */
	select_roll_cpu();
	queue_work_on(roll_cpu, system_highpri_wq, &boot_kexec_worker);
}
