// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright @ Huawei Technologies Co., Ltd. 2022-2023. ALL rights reversed.
 */
#include <linux/sched/mm.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched/signal.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task.h>
#include <linux/sched/smt.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
#include <linux/export.h>
#include <linux/kthread.h>
#include <linux/stop_machine.h>
#include <linux/mutex.h>
#include <linux/irq.h>
#include <linux/nmi.h>
#include <linux/smpboot.h>
#include <linux/slab.h>
#include <linux/cpuset.h>
#include <linux/euleros_fast_core_down.h>


#ifdef CONFIG_EULEROS_FAST_CORE_DOWN

extern int do_cpu_down(unsigned int cpu, int tasks_frozen,
			   enum cpuhp_state target);
extern int get_cpu_hotplug_state(void);

static inline void cpu_deactivate_mask(cpumask_t *cpus)
{
	int cpu;

	for_each_cpu(cpu, cpus) {
		set_cpu_active(cpu, false);
	}

	synchronize_rcu();
}

static inline int down_cpu_if_set(int cpu, cpumask_t *cpus)
{
	int ret;
	struct device *dev = NULL;

	if (cpumask_test_cpu(cpu, cpus)) {
		ret = do_cpu_down(cpu, 0, CPUHP_OFFLINE);
		if (ret == 0) {
			dev = get_cpu_device(cpu);
			dev->offline = true;
			kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
		} else {
			pr_info("down core %d failed errno %d\n", cpu, ret);
		}
		return ret;
	}

	return 0;
}

/**
 * fast core down for storage
 * storage think the original cpu_down interface is slow when there are more
 * than 100 cores. so, we develop this interface to speed up the coredown process.
 * 1. caller can transfer the order of coredown. in practice, core down in TOP_DOWN
 * order can save 70% of time compared to BOTTOM_UP.
 * 2. set all cpu inactive at the beginning of the process to reduce the time of RCU wait.
 *
 * @cpus, cores to down
 * @order, core down order (bottom up or top down)
 *
 * if all core are down successfully, returns 0;
 * on error, it returns an error number;
 */
int cpu_down_mask(cpumask_t *cpus, int order)
{
	int cpu, ret;
	s64 cpu_down_start, cpu_down_end;
	int err = -EINVAL;

	cpu_down_start = ktime_to_ns(ktime_get());

	if (cpus == NULL)
		return err;

	if (order != CORE_DOWN_BOTTOM_UP && order != CORE_DOWN_TOP_DOWN)
		return err;

	cpu_maps_update_begin();

	for_each_cpu(cpu, cpus) {
		if (!cpu_online(cpu))
			cpumask_clear_cpu(cpu, cpus);
	}

	if (cpumask_empty(cpus)) {
		err = 0;
		goto out;
	}

	if (cpumask_first(cpus) == cpumask_first(cpu_online_mask))
		goto out;

	if (get_cpu_hotplug_state()) {
		err = -EBUSY;
		goto out;
	}

	err = 0;
	cpu_deactivate_mask(cpus);

	if (order == CORE_DOWN_BOTTOM_UP) {
		for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
			ret = down_cpu_if_set(cpu, cpus);
			if (ret != 0)
				err = ret;
		}
	} else {
		for (cpu = num_possible_cpus() - 1; cpu >= 0; cpu--) {
			ret = down_cpu_if_set(cpu, cpus);
			if (ret != 0)
				err = ret;
		}
	}

	cpu_down_end = ktime_to_ns(ktime_get());
	pr_info("cpu down total time: %lld\n", cpu_down_end - cpu_down_start);
out:
	cpu_maps_update_done();

	return err;
}
EXPORT_SYMBOL(cpu_down_mask);
#endif /*CONFIG_EULEROS_FAST_CORE_DOWN*/

