// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
 *
 * RAS CPU correctable errors collector.
 *
 * Collects the count of CPU correctable errors.
 *
 * We need to isolate a CPU core when large number of correctable errors
 * are reported on that CPU core too often.This is done by calling remove_cec_cpu()
 * when the CEs count is exceeded the threshold value in a short time period.
 *
 * The cpu ce collector maintain the sliding time window with equal time slots to
 * store the ce counts corresponding to each time slot in a circular buffer. The
 * index of the buffer would increase in the periodically scheduled work
 * The time period of the work function is total time period/ number of time slots.
 * When the new ce count for a cpu is added, the sum of the most recent ce counts
 * stored in the buffer would be checked whether it exceeded the ce threshold value,
 * if so, a flag would be set to offline the cpu, kick a dedicated work
 * function and the cpu would be offlined by the work function.
 *
 * The CE count threshold value and time period are configurable through the
 * cpu_cec interface provided in the RAS debugfs.
 *
 * CPU CEC intrface in the /sys/kernel/debug/ras/cpu_cec/
 * @disable: Disable the CPU CEs collector.
 * @time_period: Time period, in seconds, for the CPU CE count threshold
 * @threshold: Threshold value for the CPU CEs to offline the CPU core.
 */

#include <linux/ras.h>
#include <linux/workqueue.h>
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/cpu_cec.h>

#include "debugfs.h"

/* Time period for the CPU CEs count threshold check, is 24hrs by default. */

#define RAS_CPU_CEC_DEFAULT_TIME_PERIOD	(24 * 60 * 60)  /* 24 hrs */
#define RAS_CPU_CEC_MIN_TIME_PERIOD	(1 * 60 * 60)   /* 1h */
#define RAS_CPU_CEC_MAX_TIME_PERIOD	(30 * 24 * 60 * 60)    /* one month */

/* Threshold value of the CPU corrected errors for isloating the CPU. */
#define RAS_CPU_CE_THRESHOLD	5000
#define RAS_CPU_CE_THRESHOLD_MIN 1

/* Flags indicates a cpu core to offline and has been offlined
 * due to the cpu CEs exceed threshold.
 */
#define RAS_CEC_OFFLINE_CPU	BIT(0)
#define RAS_CEC_CPU_OFFLINED	BIT(1)

/* sub divisions of the sliding time window */
#define RAS_CPU_CEC_NUM_TIME_SLOTS	10

/**
 * cpu_cec_list - Per CPU corrected error collector storage
 * @ces_count:	The number of correctable errors collected.
 * @flag:	CEC flag.
 * @buf_ce_count:	buffer to store the most recent ce counts in each
 *			time slots of the sliding time window.
 * @buf_index:	buffer index corresponding to the current time slot.
 * @cpu:	cpu logical index.
 */
static struct cpu_cec_list {
	struct work_struct work;
	u64 ces_count;
	u64 flag;
	u64 buf_ce_count[RAS_CPU_CEC_NUM_TIME_SLOTS];
	u32 buf_index;
	u32 cpu;
} *cpu_cec_list;

static DEFINE_SPINLOCK(cpu_cec_lock);

/* Disable the CPU correctable error collector, enabled by default */
static u64 cpu_cec_disable = 1;

/* Amount of errors after which we offline the CPU. */
static u64 cpu_ce_threshold = RAS_CPU_CE_THRESHOLD;

/* Time period for the CPU CE count threshold check. */
static struct delayed_work cpu_cec_work;
static u64 cpu_cec_time_period = RAS_CPU_CEC_DEFAULT_TIME_PERIOD;

/*
 * @interval in seconds
 */
static void cpu_cec_mod_work(unsigned long interval)
{
	unsigned long delay;

	if (cpu_cec_disable)
		return;
	delay = (interval / RAS_CPU_CEC_NUM_TIME_SLOTS) * HZ;
	mod_delayed_work(system_wq, &cpu_cec_work, round_jiffies(delay));
}

static void cpu_cec_work_fn(struct work_struct *work)
{
	int cpu;
	unsigned long flags;
	struct cpu_cec_list *cpu_cec;

	if (cpu_cec_disable)
		return;

	for_each_present_cpu(cpu) {
		cpu_cec = &cpu_cec_list[cpu];
		/* continue update buf index and clear corresponding ce count here
		 * the cpus present because a cpu could be offlined elsewhere and
		 */
		spin_lock_irqsave(&cpu_cec_lock, flags);
		cpu_cec->buf_index = (cpu_cec->buf_index + 1) % RAS_CPU_CEC_NUM_TIME_SLOTS;
		cpu_cec->buf_ce_count[cpu_cec->buf_index] = 0;
		spin_unlock_irqrestore(&cpu_cec_lock, flags);
	}

	cpu_cec_mod_work(cpu_cec_time_period);
}

static int remove_cec_cpu(unsigned int cpu)
{
	int ret;

	lock_device_hotplug();
	ret = device_offline(get_cpu_device(cpu));
	unlock_device_hotplug();

	return ret;
}

/*
 * Work function to offline the cpu because the offlining to be done
 * in the process context and also to avoid delay in offlining the cpu.
 */
static void cpu_cec_offline_work_fn(struct work_struct *work)
{
	int rc, i;
	unsigned long flags;
	struct cpu_cec_list *cpu_cec;

	if (cpu_cec_disable)
		return;

	cpu_cec = container_of(work, struct cpu_cec_list, work);
	if (!(cpu_cec->flag & RAS_CEC_OFFLINE_CPU))
		return;

	pr_info("Start to offline CPU%d, threshold edceeded!\n", cpu_cec->cpu);
	rc = remove_cec_cpu(cpu_cec->cpu);
	if (!rc) {
		spin_lock_irqsave(&cpu_cec_lock, flags);
		cpu_cec->flag &= ~RAS_CEC_OFFLINE_CPU;
		cpu_cec->flag |= RAS_CEC_CPU_OFFLINED;
		cpu_cec->buf_index = 0;
		for (i = 0; i < RAS_CPU_CEC_NUM_TIME_SLOTS; i++)
			cpu_cec->buf_ce_count[i] = 0;
		spin_unlock_irqrestore(&cpu_cec_lock, flags);
	} else {
		pr_warn("Failed to offline CPU%d, error %d\n", cpu_cec->cpu, rc);
	}
}

static void cpu_cec_check_threshold(int cpu)
{
	int i;
	u64 sum_ce_counts = 0;
	struct cpu_cec_list *cpu_cec;

	cpu_cec = &cpu_cec_list[cpu];
	for (i = 0; i < RAS_CPU_CEC_NUM_TIME_SLOTS; i++)
		sum_ce_counts += cpu_cec->buf_ce_count[i];

	if (sum_ce_counts >= cpu_ce_threshold) {
		cpu_cec->flag |= RAS_CEC_OFFLINE_CPU;
		cpu_cec->cpu = cpu;

		/* kick the work function to offline the cpu */
		schedule_work(&cpu_cec->work);
	}
}

/*
 * cpu_cec_add_ce: add CPU correctable error count to the CPU
 * correctable erorr collector.
 * @cpu: CPU index.
 * @ce_count: CPU correctable error count.
 */
int cpu_cec_add_ce(int cpu, u64 ce_count)
{
	unsigned long flags;
	struct cpu_cec_list *cpu_cec;

	if (!cpu_cec_list || !cpu_online(cpu) || cpu_cec_disable)
		return -ENODEV;

	spin_lock_irqsave(&cpu_cec_lock, flags);
	cpu_cec = &cpu_cec_list[cpu];
	if (cpu_cec->flag & RAS_CEC_OFFLINE_CPU) {
		spin_unlock_irqrestore(&cpu_cec_lock, flags);
		return 0;
	}

	/* reset the flag and ce counts for the cpu back online, which previously offlined here */
	if (cpu_cec->flag & RAS_CEC_CPU_OFFLINED) {
		cpu_cec->ces_count = 0;
		cpu_cec->flag = 0;
	}

	cpu_cec->ces_count += ce_count;
	cpu_cec->buf_ce_count[cpu_cec->buf_index] += ce_count;
	spin_unlock_irqrestore(&cpu_cec_lock, flags);
	cpu_cec_check_threshold(cpu);

	return 0;
}

static int u64_get(void *data, u64 *val)
{
	*val = *(u64 *)data;
	return 0;
}

static int cpu_cec_disable_set(void *data, u64 val)
{
	int cpu, i;
	unsigned long flags;
	struct cpu_cec_list *cpu_cec;

	if (val < 0 || val > 1) {
		pr_warn("disable should be 0 or 1!\n");
		return -EINVAL;
	}

	if (cpu_cec_disable == val)
		return 0;

	*(u64 *)data = val;
	cpu_cec_disable = val;

	spin_lock_irqsave(&cpu_cec_lock, flags);
	for_each_present_cpu(cpu) {
		cpu_cec = &cpu_cec_list[cpu];
		cpu_cec->ces_count = 0;
		cpu_cec->buf_index = 0;
		cpu_cec->flag = 0;
		for (i = 0; i < RAS_CPU_CEC_NUM_TIME_SLOTS; i++)
			cpu_cec->buf_ce_count[i] = 0;
	}
	spin_unlock_irqrestore(&cpu_cec_lock, flags);
	cpu_cec_mod_work(cpu_cec_time_period);

	return 0;
}

DEFINE_DEBUGFS_ATTRIBUTE(cpu_cec_disable_ops, u64_get,
			 cpu_cec_disable_set, "%lld\n");

static int cpu_cec_time_period_set(void *data, u64 val)
{
	if (!(val >= RAS_CPU_CEC_MIN_TIME_PERIOD && val <= RAS_CPU_CEC_MAX_TIME_PERIOD)) {
		pr_warn("time_period should be between %d ~ %d!\n",
			RAS_CPU_CEC_MIN_TIME_PERIOD, RAS_CPU_CEC_MAX_TIME_PERIOD);
		return -EINVAL;
	}

	*(u64 *)data = val;
	cpu_cec_time_period = val;

	cpu_cec_mod_work(cpu_cec_time_period);

	return 0;
}

DEFINE_DEBUGFS_ATTRIBUTE(cpu_cec_time_period_ops, u64_get,
			 cpu_cec_time_period_set, "%lld\n");

static int cpu_ce_threshold_set(void *data, u64 val)
{
	if (!(val <= RAS_CPU_CE_THRESHOLD && val >= RAS_CPU_CE_THRESHOLD_MIN)) {
		pr_warn("threshold should be between %d ~ %d!\n",
			RAS_CPU_CE_THRESHOLD_MIN, RAS_CPU_CE_THRESHOLD);
		return -EINVAL;
	}

	*(u64 *)data = val;
	cpu_ce_threshold = val;

	return 0;
}

DEFINE_DEBUGFS_ATTRIBUTE(cpu_ce_threshold_ops, u64_get,
			 cpu_ce_threshold_set, "%lld\n");

static int __init create_debugfs_cpu_cec_nodes(void)
{
	struct dentry *d, *off, *tp, *threshold;

	d = debugfs_create_dir("cpu_cec", ras_debugfs_dir);
	if (!d) {
		pr_warn("Error creating RAS CPU CEC debugfs node.\n");
		return -ENOMEM;
	}

	off = debugfs_create_file("disable", 0600, d, &cpu_cec_disable,
				  &cpu_cec_disable_ops);
	if (!off) {
		pr_warn("Error creating cpu_cec_disable debugfs node.\n");
		goto err;
	}

	tp = debugfs_create_file("time_period", 0600, d, &cpu_cec_time_period,
				 &cpu_cec_time_period_ops);
	if (!tp) {
		pr_warn("Error creating cpu_ce_time_period debugfs node.\n");
		goto err;
	}

	threshold = debugfs_create_file("threshold",
					0600, d,
					&cpu_ce_threshold,
					&cpu_ce_threshold_ops);
	if (!threshold) {
		pr_warn("Error creating cpu_ce_threshold debugfs node.\n");
		goto err;
	}

	return 0;
err:
	debugfs_remove_recursive(d);

	return -ENOMEM;
}

static void __init cpu_cec_init(void)
{
	int cpu;
	int num_cpus = num_present_cpus();
	unsigned long delay = (RAS_CPU_CEC_DEFAULT_TIME_PERIOD /
			       RAS_CPU_CEC_NUM_TIME_SLOTS) * HZ;

	cpu_cec_list = kcalloc(num_cpus, sizeof(*cpu_cec_list), GFP_KERNEL);
	if (!cpu_cec_list) {
		cpu_cec_disable = 1;
		return;
	}

	if (create_debugfs_cpu_cec_nodes()) {
		kfree(cpu_cec_list);
		cpu_cec_list = NULL;
		cpu_cec_disable = 1;
		return;
	}

	for_each_present_cpu(cpu)
		INIT_WORK(&cpu_cec_list[cpu].work, cpu_cec_offline_work_fn);

	INIT_DELAYED_WORK(&cpu_cec_work, cpu_cec_work_fn);
	schedule_delayed_work(&cpu_cec_work, round_jiffies(delay));

	pr_info("RAS CPU correctable errors collector initialized.\n");
}

late_initcall(cpu_cec_init);
