// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2018-2023.
 * Description: irq: merge high-precision timer can reduce interrupts and context switches
 * Author: langfei
 * Create: 2023-12-25
 */

#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/sched/isolation.h>

#include "tick-sched.h"
#include "time_high_precision.h"

int correct_proc_stat __cacheline_aligned;
DEFINE_PER_CPU(int, period_divisor);
static DEFINE_PER_CPU(u64, last_write_jiffies);

bool hrtimer_norestart_enable(struct tick_sched *ts)
{
	if (correct_proc_stat) {
		if (unlikely(ts->tick_stopped) && *this_cpu_ptr(&period_divisor) == 0)
			return true;
	} else {
		if (unlikely(ts->tick_stopped))
			return true;
	}
	return false;
}

bool period_divisor_valid(int cpu)
{
	if (correct_proc_stat)
		if (per_cpu(period_divisor, cpu) != 0)
			return false;
	return true;
}

static ssize_t period_divisor_store(struct kobject *kobj, struct kobj_attribute *attr,
			     const char *buf, size_t count)
{
	int divisor, cpu, rv;
	u64 last_jiffies;

	rv = kstrtoint(buf, 10, &divisor);
	if (rv < 0)
		return rv;
	if (divisor < 0 || divisor > 10) {
		pr_warn("invalid input of cpu %s's preiod divisor\n", kobj->name);
		return -EINVAL;
	}

	rv = kstrtoint(kobj->name, 10, &cpu);
	if (rv < 0)
		return rv;

	last_jiffies = per_cpu(last_write_jiffies, cpu);
	if (last_jiffies == 0)
		per_cpu(last_write_jiffies, cpu) = jiffies_64;
	else if (jiffies_64 - last_jiffies < HZ)
		return -EBUSY;

	per_cpu(last_write_jiffies, cpu) = jiffies_64;

	if (per_cpu(period_divisor, cpu) == divisor)
		return count;

	cpu_hotplug_disable();

	if (!cpu_online(cpu)) {
		pr_warn("CPU %d is offline, cannot set CPU's period divisor\n", cpu);
		cpu_hotplug_enable();
		return -EBUSY;
	}

	per_cpu(period_divisor, cpu) = divisor;

#if defined(CONFIG_X86_64)
	if (divisor == 0)
		smp_call_function_single(cpu, change_to_deadline, NULL, 1);
	else
		smp_call_function_single(cpu, change_to_period, NULL, 1);
#endif

	cpu_hotplug_enable();

	return count;
}

static ssize_t period_divisor_show(struct kobject *kobj,
			    struct kobj_attribute *attr, char *buf)
{
	int cpu, rv;

	rv = kstrtoint(kobj->name, 10, &cpu);
	if (rv < 0)
		return rv;

	return sprintf(buf, "%d\n", per_cpu(period_divisor, cpu));
}

static struct kobj_attribute period_divisor_attr = __ATTR_RW(period_divisor);

static ssize_t correct_proc_stat_store(struct kobject *kobj, struct kobj_attribute *attr,
				const char *buf, size_t count)
{
	int rv, correct = 0;

	rv = kstrtoint(buf, 10, &correct);
	if (rv < 0)
		return -EINVAL;
	/* The value can only be changed from 0 to 1. Therefore, there is no concurrency problem. */
	if (correct == 1 && correct_proc_stat == 0)
		correct_proc_stat = 1;
	else
		return -EPERM;

	return count;
}

static ssize_t correct_proc_stat_show(struct kobject *kobj,
				struct kobj_attribute *attr, char *buf)
{
	return sprintf(buf, "%d\n", correct_proc_stat);
}

static struct kobj_attribute correct_proc_stat_attr = __ATTR_RW(correct_proc_stat);

static int __init period_divisor_init(void)
{
	int cpu, ret;
	struct kobject *merge_irq, *dir;
	char buf[NAME_MAX];

	if (get_hrtimer_hres_enabled() == false) {
		pr_info("Cannot enable clock irq merge when hrtimer hres is diabled.\n");
		return 0;
	}

	merge_irq = kobject_create_and_add("merge_irq", kernel_kobj);
	if (!merge_irq)
		return -ENOMEM;

	ret = sysfs_create_file(merge_irq, &correct_proc_stat_attr.attr);
	if (ret) {
		kobject_put(merge_irq);
		return -ENOMEM;
	}

	for_each_possible_cpu(cpu) {
		if (!housekeeping_test_cpu(cpu, HK_FLAG_DOMAIN) ||
			!housekeeping_test_cpu(cpu, HK_FLAG_TICK) ||
			!housekeeping_test_cpu(cpu, HK_FLAG_TIMER))
			continue;
		snprintf(buf, NAME_MAX, "%d", cpu);
		dir = kobject_create_and_add(buf, merge_irq);
		if (!dir)
			return -ENOMEM;
		ret = sysfs_create_file(dir, &period_divisor_attr.attr);
		if (ret) {
			kobject_put(dir);
			return ret;
		}
	}
	return 0;
}
late_initcall(period_divisor_init);
