// SPDX-License-Identifier: GPL-2.0
/*
 * Detect if the cfs tasks are starved on a system
 *
 * started by Wenyu Liu, Copyright (C) 2022 Huawei, Inc.
 */
#include <linux/sched/isolation.h>
#include <linux/kthread.h>
#include <asm/irq_regs.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/cpu.h>
#include <linux/cpuhotplug.h>
#include <linux/smpboot.h>

#include "cfs_wdt.h"

static enum cpuhp_state cfs_wdt_online;

unsigned int cfs_wdt_mode = CFS_WDT_ENABLED;
static bool cfs_watchdog_initialized __read_mostly;
static unsigned int cfs_watchdog_thresh_ms = CFS_WD_THRESH_DEFAULT;
static unsigned int cfs_watchdog_period_ms = CFS_WD_SAMPLE_PERIODE_DEFAULT;
static u64 __read_mostly cfs_watchdog_sample_period;
/* cpumask for starved detector */
struct cpumask cfs_watchdog_cpumask __read_mostly;
static struct cpumask cfs_wdt_allowed_cpumask __read_mostly;
/* mutex for protecting detector reconfigure operation */
static DEFINE_MUTEX(cfs_watchdog_mutex);
/* per-cpu data for hrtimers */
static DEFINE_PER_CPU(struct hrtimer, cfs_watchdog_hrtimer);
static DEFINE_PER_CPU(unsigned long, cfs_watchdog_touch_ts);
static DEFINE_PER_CPU(unsigned long, cfs_watchdog_report_ts);
/* per-cpu data for cfs watchdog touch work */
static DEFINE_PER_CPU(struct work_struct, cfs_touch_work);
static DEFINE_PER_CPU(struct completion, cfs_detector_completion);

/* cfs_wdt log ratelimit,lifted from the cfs_wdt sys subsystem
 *
 * This enforces a rate limit: not more than 5 warn messages
 * every 3min by default
 */
DEFINE_RATELIMIT_STATE(cfs_wd_rs, 3 * 60 * HZ, 5);

typedef void (*show_regs_fn)(struct pt_regs *);

static show_regs_fn show_regs_ptr;

static void cfs_watchdog_enable(unsigned int cpu);
static void cfs_watchdog_disable(unsigned int cpu);

static unsigned long get_timestamp(void)
{
	/* get timestamp with ms */
	return sched_clock() >> 20LL;  /* 2^20 ~= 10^6 */
}

static void update_cfs_report_ts(void)
{
	__this_cpu_write(cfs_watchdog_report_ts, get_timestamp());
}

static void update_cfs_touch_ts(void)
{
	__this_cpu_write(cfs_watchdog_touch_ts, get_timestamp());
	update_cfs_report_ts();
}

static inline unsigned int get_cfs_starved_thresh(void)
{
	return cfs_watchdog_thresh_ms;
}

static void set_detect_period(void)
{
	cfs_watchdog_sample_period = cfs_watchdog_period_ms * (u64)NSEC_PER_MSEC;
}

static void cfs_touch_work_fn(struct work_struct *work)
{
	update_cfs_touch_ts();
	complete(this_cpu_ptr(&cfs_detector_completion));
}

static int is_cfs_starved(unsigned long touch_ts,
			  unsigned long period_ts,
			  unsigned long now)
{
	if (cfs_wdt_mode && cfs_watchdog_thresh_ms) {
		if (time_after(now, period_ts + get_cfs_starved_thresh()))
			return now - touch_ts;
	}
	return 0;
}

static void cfs_wdt_touch_work(void)
{
	int cpu = smp_processor_id();
	struct work_struct *work = this_cpu_ptr(&cfs_touch_work);

	queue_work_on(cpu, system_wq, work);
}

static enum hrtimer_restart cfs_watchdog_timer_fn(struct hrtimer *hrtimer)
{
	int cpu = smp_processor_id();
	unsigned long touch_ts, period_ts, now;
	struct pt_regs *regs = get_irq_regs();
	int duration;

	if (!cfs_wdt_mode)
		return HRTIMER_NORESTART;

	/* .. and repeat */
	hrtimer_forward_now(hrtimer, ns_to_ktime(cfs_watchdog_sample_period));

	if (completion_done(this_cpu_ptr(&cfs_detector_completion))) {
		reinit_completion(this_cpu_ptr(&cfs_detector_completion));
		cfs_wdt_touch_work();
	}

	now = get_timestamp();
	period_ts = __this_cpu_read(cfs_watchdog_report_ts);
	touch_ts = __this_cpu_read(cfs_watchdog_touch_ts);
	/* Check for a cfs starved. */
	duration = is_cfs_starved(touch_ts, period_ts, now);
	if (unlikely(duration)) {
		/* Start period for the next cfs_starved warning. */
		update_cfs_report_ts();

		if (!__ratelimit(&cfs_wd_rs)) {
			pr_debug("DEBUG: CFS watchdog - CPU#%d CFS task starved for %dms [%s: %d]",
				  cpu, duration, current->comm, task_pid_nr(current));
			return HRTIMER_RESTART;
		}

		pr_warn("WARN: CFS watchdog - CPU#%d CFS task starved for %dms [%s: %d]",
			 cpu, duration, current->comm, task_pid_nr(current));

		if (regs && show_regs_ptr)
			show_regs_ptr(regs);
		else
			dump_stack();
	}
	return HRTIMER_RESTART;
}

static void cfs_watchdog_enable(unsigned int cpu)
{
	struct hrtimer *hrtimer = this_cpu_ptr(&cfs_watchdog_hrtimer);
	struct completion *done = this_cpu_ptr(&cfs_detector_completion);

	WARN_ON_ONCE(cpu != smp_processor_id());

	init_completion(done);
	complete(done);

	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
	hrtimer->function = cfs_watchdog_timer_fn;
	hrtimer_start(hrtimer, ns_to_ktime(cfs_watchdog_sample_period),
		      HRTIMER_MODE_REL_PINNED_HARD);

	/* Initialize timestamp */
	update_cfs_touch_ts();
}

static void cfs_watchdog_disable(unsigned int cpu)
{
	struct hrtimer *hrtimer = this_cpu_ptr(&cfs_watchdog_hrtimer);

	WARN_ON_ONCE(cpu != smp_processor_id());

	hrtimer_cancel(hrtimer);

	wait_for_completion(this_cpu_ptr(&cfs_detector_completion));
}

static int cfs_wdt_stop_fn(void *data)
{
	cfs_watchdog_disable(smp_processor_id());
	return 0;
}

static void cfs_detector_stop_all(void)
{
	int cpu;

	if (!cfs_watchdog_initialized)
		return;

	for_each_cpu(cpu, &cfs_wdt_allowed_cpumask)
		smp_call_on_cpu(cpu, cfs_wdt_stop_fn, NULL, false);

	pr_info("CFS watchdog - stop starving detectors. [cfs_wdt cpumask: %*pb]\n",
		 cpumask_pr_args(&cfs_wdt_allowed_cpumask));

	cpumask_clear(&cfs_wdt_allowed_cpumask);
}

static int cfs_wdt_start_fn(void *data)
{
	cfs_watchdog_enable(smp_processor_id());
	return 0;
}

static void cfs_detector_start_all(void)
{
	int cpu;

	cpumask_copy(&cfs_wdt_allowed_cpumask, &cfs_watchdog_cpumask);

	for_each_cpu(cpu, &cfs_wdt_allowed_cpumask)
		smp_call_on_cpu(cpu, cfs_wdt_start_fn, NULL, false);

	pr_info("CFS watchdog - starving detectors start, [cfs_wdt cpumask: %*pb]\n",
		cpumask_pr_args(&cfs_wdt_allowed_cpumask));
}

static void cfs_detector_reconfigure(void)
{
	cpus_read_lock();
	cfs_detector_stop_all();
	set_detect_period();
	if (cfs_wdt_mode && cfs_watchdog_thresh_ms)
		cfs_detector_start_all();
	cpus_read_unlock();
}

static void cfs_touch_work_init(void)
{
	int cpu;
	struct work_struct *work;

	for_each_cpu(cpu, cpu_possible_mask) {
		work = per_cpu_ptr(&cfs_touch_work, cpu);
		INIT_WORK(work, cfs_touch_work_fn);
	}
}

static __init void cfs_detector_setup(void)
{
	/*
	 * If sysfs is off and cfs_watchdog got disabled on the
	 * command line, nothing to do here.
	 */
	if (!IS_ENABLED(CONFIG_SYSFS) && !(cfs_wdt_mode && cfs_watchdog_thresh_ms))
		return;

	mutex_lock(&cfs_watchdog_mutex);
	cfs_touch_work_init();
	cfs_detector_reconfigure();
	cfs_watchdog_initialized = true;
	mutex_unlock(&cfs_watchdog_mutex);
}

#ifdef CONFIG_SYSFS
static struct kobject *cfs_watchdog_kobj;

define_one_rw(mode);
define_one_rw(thresh_ms);
define_one_rw(sample_period_ms);
define_one_rw(cpusmask);
define_one_rw(log_ratelimit);
define_one_rw(log_ratelimit_burst);

static struct attribute *cfs_watchdog_attrs[] = {
	&mode.attr,
	&thresh_ms.attr,
	&sample_period_ms.attr,
	&cpusmask.attr,
	&log_ratelimit.attr,
	&log_ratelimit_burst.attr,
	NULL
};
ATTRIBUTE_GROUPS(cfs_watchdog);

static void sys_cfs_watchdog_update(void)
{
	cpumask_and(&cfs_watchdog_cpumask, &cfs_watchdog_cpumask, cpu_possible_mask);
	cfs_detector_reconfigure();
}

static ssize_t mode_show(struct kobject *kobj,
			struct kobj_attribute *attr, char *buf)
{
	return sprintf(buf, "%d\n", cfs_wdt_mode);
}

static ssize_t mode_store(struct kobject *kobj, struct kobj_attribute *attr,
			const char *buf, size_t count)
{
	unsigned int new, old;

	if (kstrtouint(buf, 0, &new))
		return -EINVAL;

	if (new > CFS_WD_MODE_MAX_VAL)
		return -EINVAL;

	mutex_lock(&cfs_watchdog_mutex);
	old = cfs_wdt_mode;
	if (new != old) {
		cfs_wdt_mode = new;
		sys_cfs_watchdog_update();
	}

	mutex_unlock(&cfs_watchdog_mutex);

	return count;
}

static ssize_t thresh_ms_show(struct kobject *kobj,
			struct kobj_attribute *attr, char *buf)
{
	return sprintf(buf, "%u\n", cfs_watchdog_thresh_ms);
}

static ssize_t thresh_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
			const char *buf, size_t count)
{
	unsigned int new, old;

	if (kstrtouint(buf, 0, &new))
		return -EINVAL;

	if (new < CFS_WD_THRESH_MIN)
		return -EINVAL;

	mutex_lock(&cfs_watchdog_mutex);
	old = cfs_watchdog_thresh_ms;
	if (new == old)
		goto undo;

	cfs_watchdog_thresh_ms = new;

	/* sample period max value is equals to 1/5 of the thresh,
	 * in this case cfs watchdog does not need to be reset.
	 */
	if (new >= cfs_watchdog_period_ms * 5)
		goto undo;

	/* sample value need to be adjusted follow the thresh */
	cfs_watchdog_period_ms = new / 5;

	if (cfs_wdt_mode)
		sys_cfs_watchdog_update();
undo:
	mutex_unlock(&cfs_watchdog_mutex);

	return count;
}

static ssize_t sample_period_ms_show(struct kobject *kobj,
			struct kobj_attribute *attr, char *buf)
{
	return sprintf(buf, "%d\n", cfs_watchdog_period_ms);
}

static ssize_t sample_period_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
			const char *buf, size_t count)
{
	unsigned int new;

	if (kstrtouint(buf, 0, &new))
		return -EINVAL;

	mutex_lock(&cfs_watchdog_mutex);

	/* sample period max value is equals to 1/5 of the thresh */
	if (new < CFS_WD_SAMPLE_PERIODE_MIN || new > cfs_watchdog_thresh_ms / 5) {
		count = -EINVAL;
		goto undo;
	}

	if (new == cfs_watchdog_period_ms)
		goto undo;

	cfs_watchdog_period_ms = new;

	if (cfs_wdt_mode)
		sys_cfs_watchdog_update();

undo:
	mutex_unlock(&cfs_watchdog_mutex);
	return count;
}

static ssize_t cpusmask_show(struct kobject *kobj,
			struct kobj_attribute *attr, char *buf)
{
	return sprintf(buf, "%*pb\n", cpumask_pr_args(&cfs_watchdog_cpumask));
}

static ssize_t cpusmask_store(struct kobject *kobj, struct kobj_attribute *attr,
			const char *buf, size_t count)
{
	cpumask_t mask;

	if (cpumask_parse(buf, &mask))
		return -EINVAL;

	mutex_lock(&cfs_watchdog_mutex);

	cpumask_and(&cfs_watchdog_cpumask, &mask, cpu_possible_mask);
	if (cfs_wdt_mode)
		sys_cfs_watchdog_update();
	mutex_unlock(&cfs_watchdog_mutex);
	return count;
}

static ssize_t log_ratelimit_show(struct kobject *kobj,
			struct kobj_attribute *attr, char *buf)
{
	return sprintf(buf, "%d\n", cfs_wd_rs.interval / HZ);
}

static ssize_t log_ratelimit_store(struct kobject *kobj, struct kobj_attribute *attr,
			const char *buf, size_t count)
{
	int internal;

	if (kstrtoint(buf, 0, &internal))
		return -EINVAL;
	cfs_wd_rs.interval = internal * HZ;

	return count;
}

static ssize_t log_ratelimit_burst_show(struct kobject *kobj,
			struct kobj_attribute *attr, char *buf)
{
	return sprintf(buf, "%d\n", cfs_wd_rs.burst);
}

static ssize_t log_ratelimit_burst_store(struct kobject *kobj, struct kobj_attribute *attr,
			const char *buf, size_t count)
{
	int burst;

	if (kstrtoint(buf, 0, &burst))
		return -EINVAL;

	cfs_wd_rs.burst = burst;

	return count;
}

static int __init cfs_watchdog_sysfs_init(void)
{
	int retval;

	cfs_watchdog_kobj = kobject_create_and_add("cfs_watchdog", kernel_kobj);

	if (!cfs_watchdog_kobj)
		return -ENOMEM;
	retval = sysfs_create_groups(cfs_watchdog_kobj, cfs_watchdog_groups);
	if (retval)
		kobject_put(cfs_watchdog_kobj);

	return retval;
}

static void cfs_watchdog_sysfs_exit(void)
{
	sysfs_remove_groups(cfs_watchdog_kobj, cfs_watchdog_groups);
	kobject_del(cfs_watchdog_kobj);
}
#else
static int __init cfs_watchdog_sysfs_init(void) { return 0; }
static void cfs_watchdog_sysfs_exit(void) { }
#endif /* CONFIG_SYSFS */

static int check_module_params(void)
{
	if (cfs_wdt_mode > CFS_WD_MODE_MAX_VAL
	   || cfs_watchdog_thresh_ms < CFS_WD_THRESH_MIN)
		return -EINVAL;

	cfs_watchdog_period_ms = cfs_watchdog_thresh_ms / 5;
	return 0;
}

static int cfs_wdt_cpu_online(unsigned int cpu)
{
	if (cpumask_test_cpu(cpu, &cfs_wdt_allowed_cpumask))
		cfs_watchdog_enable(cpu);
	return 0;
}

static int cfs_wdt_cpu_offline(unsigned int cpu)
{
	if (cpumask_test_cpu(cpu, &cfs_wdt_allowed_cpumask))
		cfs_watchdog_disable(cpu);
	return 0;
}

static int  __init cfs_watchdog_init(void)
{
	int err = 0;
	int rc = 0;

	err = check_module_params();
	if (err)
		return err;

	show_regs_ptr = (show_regs_fn)kallsyms_lookup_name_wrap("show_regs");

	if (housekeeping_enabled(HK_FLAG_DOMAIN))
		pr_info("Disabling cfs watchdog on isolacpus cores by default\n");

	cpumask_copy(&cfs_watchdog_cpumask, housekeeping_cpumask(HK_FLAG_DOMAIN));

	rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cfs_watchdog_timer:online",
				cfs_wdt_cpu_online, cfs_wdt_cpu_offline);
	if (rc < 0) {
		pr_err("CFS watchdog - init for hotplug failed!\n");
		err = rc;
		goto out;
	}
	cfs_wdt_online = rc;
	cfs_detector_setup();
	err = cfs_watchdog_sysfs_init();
	if (err) {
		pr_err("Failed to init sysfs of cfs_watchdog, error:%d.\n", err);
		goto out;
	}

	/* cfs_wdt module is not allowed unloaded */
	__module_get(THIS_MODULE);
out:
	return err;
}

static void cfs_watchdog_exit(void)
{
	mutex_lock(&cfs_watchdog_mutex);
	if (cfs_wdt_mode) {
		cfs_wdt_mode = 0;
		cfs_detector_reconfigure();
	}

	cpuhp_remove_state_nocalls(cfs_wdt_online);

	cfs_watchdog_sysfs_exit();
	mutex_unlock(&cfs_watchdog_mutex);
	pr_info("cfs_wdt exit!\n");
}
module_init(cfs_watchdog_init);
module_exit(cfs_watchdog_exit);
MODULE_LICENSE("GPL");

module_param_named(cfs_watchdog_mode, cfs_wdt_mode, uint, 0);
MODULE_PARM_DESC(cfs_watchdog_mode, "CFS watchdog mode to set");
module_param(cfs_watchdog_thresh_ms, uint, 0);
MODULE_PARM_DESC(cfs_watchdog_thresh_ms, "CFS starving detector thresh");

