// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 * Author: Huawei OS Kernel Lab
 * Create: Thu Aug 15 16:14:23 2023
 */

#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <asm/override.h>
struct smp_call_work {
	struct work_struct work;
	smp_call_func_t func;
	void *info;
};

static void __smp_call_work_func(struct work_struct *_work)
{
	struct smp_call_work *work = container_of(_work, struct smp_call_work, work);

	preempt_disable();
	work->func(work->info);
	preempt_enable();
}

static void __smp_call_work_async(struct work_struct *_work)
{
	__smp_call_work_func(_work);
	kfree(_work);
}

static void __smp_call_function_many(const struct cpumask *cpu_mask, smp_call_func_t func,
					void *info, smp_cond_func_t cond_func)
{
	int cpu;
	struct smp_call_work __percpu *works;

	works = alloc_percpu_gfp(struct smp_call_work, GFP_ATOMIC);
	if (!works)
		return;

	for_each_cpu(cpu, cpu_mask) {
		struct smp_call_work *work = per_cpu_ptr(works, cpu);

		INIT_WORK(&work->work, __smp_call_work_func);
		work->func = func;
		work->info = info;
		if (cond_func && !cond_func(cpu, info))
			continue;
		queue_work_on(cpu, system_highpri_wq, &work->work);
	}

	for_each_cpu(cpu, cpu_mask) {
		if (cond_func && !cond_func(cpu, info))
			continue;
		flush_work(&per_cpu_ptr(works, cpu)->work);
	}

	free_percpu(works);
}

static void __smp_call_function_many_async(const struct cpumask *cpu_mask, smp_call_func_t func,
						void *info, smp_cond_func_t cond_func)
{
	int cpu;
	struct smp_call_work *work;

	for_each_cpu(cpu, cpu_mask) {
		if (cond_func && !cond_func(cpu, info))
			continue;
		work =  kzalloc(sizeof(struct smp_call_work), GFP_ATOMIC);
		if (work == NULL)
			continue;
		INIT_WORK(&work->work, __smp_call_work_async);
		work->func = func;
		work->info = info;
		queue_work_on(cpu, system_highpri_wq, &work->work);
	}
}

void __override
smp_call_function_many_cond(const struct cpumask *mask,
				 smp_call_func_t func, void *info,
				 bool wait, smp_cond_func_t cond_func)
{
	int this_cpu = smp_processor_id();
	struct cpumask cpu_mask = {0};

	cpumask_and(&cpu_mask, mask, cpu_online_mask);
	__cpumask_clear_cpu(this_cpu, &cpu_mask);

	if (wait)
		__smp_call_function_many(&cpu_mask, func, info, cond_func);
	else
		__smp_call_function_many_async(&cpu_mask, func, info, cond_func);
}


int __override smp_call_function_single(int cpu, smp_call_func_t func, void *info, int wait)
{
	if (cpu == smp_processor_id()) {
		unsigned long flags;
		/*
		 * We can unlock early even for the synchronous on-stack case,
		 * since we're doing this from the same CPU..
		 */
		local_irq_save(flags);
		func(info);
		local_irq_restore(flags);
		return 0;
	}

	smp_call_function_many_cond(cpumask_of(cpu), func, info, wait, NULL);
	return 0;
}
