// SPDX-License-Identifier: GPL-2.0-only
/*
 * Based on kernel/sched/core.c
 *
 * Copyright (C) 1991-2002  Linus Torvalds
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 * Author: Huawei OS Kernel Lab
 * Create: Thu Aug 15 16:14:23 2023
 */

#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/wake_q.h>
#include <linux/kern_levels.h>
#include <linux/blkdev.h>
#include <linux/taskstats.h>
#include <linux/delayacct.h>
#include <linux/kernel_stat.h>
#include "../hm_kthread.h"
#include "liblinux/sched.h"
#include <linux/rcupdate_wait.h>

#include <uapi/linux/sched/types.h> /* for SCHED_RR */

static void __schedule_timeout(unsigned long long tv64)
{
	unsigned long flags = 0;
	unsigned long state;
	int *futex = NULL;
	int ret;
	struct task_struct *tsk = current;

	local_irq_disable();
	rcu_note_context_switch(false);
	local_irq_enable();

	/* This pairs with smp_mb() (B) in try_to_wake_up() that the wake_up thread does. */
	smp_mb();
	state = READ_ONCE(tsk->state);

	sched_submit_work(tsk);

	if (state == TASK_PARKED) {
		flags |= LIBLINUX_PAL_FUTEX_UNINTERRUPTIBLE;
	} else if (unlikely(state & ~TASK_NORMAL)) {
		panic("liblinux: task [%s] unknown state 0x%lx (%c)\n",
		      tsk->comm, tsk->state, task_state_to_char(tsk));
	}

#if defined(__BIG_ENDIAN) && BITS_PER_LONG == 64
	futex = (int *)&tsk->state + 1;
#else
	futex = (int *)&tsk->state;
#endif

	if (state & TASK_UNINTERRUPTIBLE)
		flags |= LIBLINUX_PAL_FUTEX_UNINTERRUPTIBLE;

	if (state != TASK_RUNNING) {
		ret = liblinux_pal_futex_timedwait(futex, (int)state, tv64, flags);
		if (ret == -EINTR) {
			/* temporarily only set signal for actv task */
			if (!(tsk->flags & PF_KTHREAD))
				set_tsk_thread_flag(tsk, TIF_SIGPENDING);
		}
	} else {
		yield();
	}
	sched_update_worker(tsk);
}

void schedule(void)
{
	__schedule_timeout(LIBLINUX_PAL_FUTEX_MAX_TIMEOUT);
}
EXPORT_SYMBOL(schedule);

void __sched yield(void)
{
	liblinux_pal_thread_yield();
}
EXPORT_SYMBOL(yield);

long io_schedule_timeout(long timeout)
{
	int old_iowait = current->in_iowait;
#ifndef CONFIG_LIBLINUX
	struct rq *rq;
#endif
	long ret;

	current->in_iowait = 1;
	if (blk_needs_flush_plug(current))
		blk_schedule_flush_plug(current);

	delayacct_blkio_start();
#ifndef CONFIG_LIBLINUX
	rq = raw_rq();
	atomic_inc(&rq->nr_iowait);
#endif
	ret = schedule_timeout(timeout);
	current->in_iowait = old_iowait;
#ifndef CONFIG_LIBLINUX
	atomic_dec(&rq->nr_iowait);
#endif
	delayacct_blkio_end(current);

	return ret;
}
EXPORT_SYMBOL(io_schedule_timeout);

void __sched io_schedule(void)
{
	io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL(io_schedule);

int try_to_wake_up(struct task_struct *p, unsigned int unused, int sync)
{
	int *futex = NULL;
	unsigned long state;

	/* CAUSION: for some testcase, this api is called with `p == NULL` */
	if (p == NULL) {
		pr_warn("%s: NULL task_struct detected\n", __func__);
		return 0;
	}

	/* This pairs with smp_store_mb() in set_current_state() that the waiting thread does. */
	smp_mb(); /* A */
	state = READ_ONCE(p->state);

#if defined(__BIG_ENDIAN) && BITS_PER_LONG == 64
	futex = (int *)&p->state + 1;
#else
	futex = (int *)&p->state;
#endif

	if (state & TASK_NORMAL) {
		WRITE_ONCE(p->state, TASK_RUNNING);
		/*
		 * Pairs with the smp_mb() in __schedule_timeout().
		 * Make sure update to p->state is visible now.
		 */
		smp_mb(); /* B */
		liblinux_pal_futex_wake(futex);
		return 1;
	} else if (state == TASK_PARKED) {
		liblinux_pal_futex_wake(futex);
		return 1;
	} else if ((state != TASK_RUNNING) && (state != TASK_DEAD)) {
		pr_warn("%s: unexpected state detected: state=%lu\n", __func__, state);
		return 0;
	}

	return 0;
}

int wake_up_process(struct task_struct *p)
{
	return try_to_wake_up(p, TASK_NORMAL, 0);
}
EXPORT_SYMBOL(wake_up_process);

int default_wake_function(wait_queue_entry_t *curr, unsigned int mode, int wake_flags,
			  void *key)
{
	return try_to_wake_up(curr->private, mode, wake_flags);
}
EXPORT_SYMBOL(default_wake_function);

static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
{
	struct wake_q_node *node = &task->wake_q;

	/*
	 * Atomically grab the task, if ->wake_q is !nil already it means
	 * its already queued (either by us or someone else) and will get the
	 * wakeup due to that.
	 *
	 * In order to ensure that a pending wakeup will observe our pending
	 * state, even in the failed case, an explicit smp_mb() must be used.
	 */
	smp_mb__before_atomic();
	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
		return false;

	/*
	 * The head is context local, there can be no concurrency.
	 */
	*head->lastp = node;
	head->lastp = &node->next;
	return true;
}

void wake_q_add(struct wake_q_head *head, struct task_struct *task)
{
	if (__wake_q_add(head, task))
		get_task_struct(task);
}

void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
{
	if (!__wake_q_add(head, task))
		put_task_struct(task);
}

void wake_up_q(struct wake_q_head *head)
{
	struct wake_q_node *node = head->first;

	while (node != WAKE_Q_TAIL) {
		struct task_struct *task;

		task = container_of(node, struct task_struct, wake_q);
		BUG_ON(!task);
		/* task can safely be re-inserted now */
		node = node->next;
		task->wake_q.next = NULL;

		/*
		 * wake_up_process() implies a wmb() to pair with the queueing
		 * in wake_q_add() so as not to miss wakeups.
		 */
		wake_up_process(task);
		put_task_struct(task);
	}
}

/**
 * schedule_preempt_disabled - called with preemption disabled
 *
 * Returns with preemption disabled. Note: preempt_count must be 1
 */
void __sched schedule_preempt_disabled(void)
{
	sched_preempt_enable_no_resched();
	schedule();
	preempt_disable();
}

int wake_up_state(struct task_struct *p, unsigned int state)
{
	return try_to_wake_up(p, state, 0);
}

int sched_cpu_activate(unsigned int cpu)
{
	set_cpu_active(cpu, true);

#ifdef CONFIG_LIBLINUX_HOTPLUG
	liblinux_pal_process_set_slv(cpu, 0);
#endif
	return 0;
}

int __weak sched_cpu_deactivate(unsigned int cpu)
{
	set_cpu_active(cpu, false);
	/*
	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
	 * users of this state to go away such that all new such users will
	 * observe it.
	 *
	 * Do sync before park smpboot threads to take care the rcu boost case.
	 */
	synchronize_rcu();

	return 0;
}

int sched_cpu_dying(unsigned int cpu)
{
#ifdef CONFIG_LIBLINUX_HOTPLUG
	liblinux_pal_process_set_slv(cpu, PROCESS_SCHED_LEVEL_MAX);
#endif
	return 0;
}

static int _sched_setscheduler(struct task_struct *p, int policy, int prio)
{
	int ret = 0;

	if ((policy != SCHED_RR) && (policy != SCHED_FIFO)) {
		ret = liblinux_pal_thread_setscheduler(&task_thread_info(p)->thread_handle,
						       prio, policy);
	} else {
		if (strncmp(p->comm, "migration/", 10) == 0) {
			ret = liblinux_pal_thread_setscheduler(&task_thread_info(p)->thread_handle,
							       prio, policy);
		} else {
			pr_warn("liblinux: UN-IMPL: %s for SCHED_RR policy\n", __func__);
		}
	}

	return ret;
}

void sched_set_stop_task(int cpu, struct task_struct *stop)
{
#ifdef CONFIG_LIBLINUX_HOTPLUG
	liblinux_pal_thread_set_slv(&task_thread_info(stop)->thread_handle, THREAD_SCHED_LEVEL_MAX);
	(void)_sched_setscheduler(stop, SCHED_RR, 82);
#endif
}

#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
int get_nohz_timer_target(void)
{
	return smp_processor_id();
}
void wake_up_nohz_cpu(int cpu)
{
}
#endif

unsigned long nr_iowait_cpu(int cpu)
{
	return 0;
}

int idle_cpu(int cpu)
{
	return 0;
}

/* kernel/sched/core.c */
#ifdef CONFIG_SMP
#include <linux/bitmap.h>
#include <linux/cpumask.h>
#include <linux/sched.h>

#define __RETRY_CNT_MAX		20

void __weak liblinux_get_cpus_allowed(struct cpumask *allowed_mask,
				      const struct cpumask *new_mask)
{
	cpumask_copy(allowed_mask, new_mask);
}

int __set_cpus_allowed(struct task_struct *p, const struct cpumask *__new_mask)
{
	int i;
	int ret = 0;
	struct cpumask new_mask;

	liblinux_get_cpus_allowed(&new_mask, __new_mask);

	if (cpumask_equal(&p->cpus_mask, &new_mask) || cpumask_empty(&new_mask) ||
	    !cpumask_intersects(&new_mask, cpu_online_mask)) {
		return ret;
	}

	/*
	 * If config_liblinux_preempt is enabled, set affinity will fail when
	 * the thread is running with preempt disabled, so retry it more times.
	 * It can be fixed until LDK support wait_task_inactive.
	 */
	for (i = 0; i < __RETRY_CNT_MAX; i++) {
		ret = liblinux_pal_thread_setaffinity(
				&task_thread_info(p)->thread_handle,
				*((unsigned long *)cpumask_bits(&new_mask)));
		if (ret >= 0)
			break;
	}

	if (i == __RETRY_CNT_MAX) {
		pr_warn("set affinity 0x%lx for thread [%s] failed, ret=%d\n",
		      *cpumask_bits(&new_mask), p->comm, ret);
	} else if (i != 0) {
		pr_warn("set affinity 0x%lx for thread [%s] retry times=%d\n",
		       *cpumask_bits(&new_mask), p->comm, i);
	}

	/* TODO: check it
	task_thread_info(p)->cpu = cpumask_first(&new_mask);
	*/
	cpumask_copy(&p->cpus_mask, &new_mask);
	p->nr_cpus_allowed = cpumask_weight(&new_mask);

	return ret;
}

void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *__new_mask)
{
	(void)__set_cpus_allowed(p, __new_mask);
}

int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
{
	return __set_cpus_allowed(p, new_mask);
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);


DEFINE_PER_CPU(struct kernel_stat, kstat);
DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);

EXPORT_PER_CPU_SYMBOL(kstat);
EXPORT_PER_CPU_SYMBOL(kernel_cpustat);

#endif

void sched_set_fifo(struct task_struct *p)
{
	pr_info("liblinux: UN-IMPL: %s: task=%s\n", __func__, p->comm);
}
EXPORT_SYMBOL_GPL(sched_set_fifo);

int io_schedule_prepare(void)
{
	int old_iowait = current->in_iowait;

	current->in_iowait = 1;
	blk_schedule_flush_plug(current);

	return old_iowait;
}

void io_schedule_finish(int token)
{
	current->in_iowait = token;
}

int sched_setscheduler(struct task_struct *p, int policy,
		       const struct sched_param *param)
{
	return _sched_setscheduler(p, policy, param->sched_priority);
}

int sched_setscheduler_nocheck(struct task_struct *p, int policy,
			       const struct sched_param *param)
{
	return _sched_setscheduler(p, policy, param->sched_priority);
}

/*
 * wait_task_inactive - wait for a thread to unschedule.
 *
 * If @match_state is nonzero, it's the @p->state value just checked and
 * not expected to change.  If it changes, i.e. @p might have woken up,
 * then return zero.  When we succeed in waiting for @p to be off its CPU,
 * we return a positive number.
 *
 * The caller must ensure that the task *will* unschedule sometime soon,
 * else this function might spin for a *long* time. This function can't
 * be called with interrupts off, or it may introduce deadlock with
 * smp_call_function() if an IPI is sent by the same process we are
 * waiting to become inactive.
 */
unsigned long wait_task_inactive(struct task_struct *p, long match_state)
{
	int inactive;
	unsigned long ret = 0;

	/* wait for task @p to be inactive */
	inactive = liblinux_pal_thread_inactive(&task_thread_info(p)->thread_handle);
	while (inactive == 0) {
		/*
		 * if task state of @p changed during waiting, we stop waiting
		 * Consider this case:
		 * 1. task A sets its state to PARKED, and does not call schedule yet
		 * 2. task B calls this function to wait for task A unschedule, then it yields
		 * 3. task A unscheduled, then task C wakes task A up, task A becomes RUNNABLE again
		 * 4. task B wakes up, it finds that it is waiting on a mismatched state, thus exits
		 */
		if (match_state && unlikely(p->state != match_state))
			break;

		/* we use yield here instead of spin */
		yield();
		inactive = liblinux_pal_thread_inactive(&task_thread_info(p)->thread_handle);
	}

	/* check whether @p->state be in matching state */
	if (!match_state || p->state == match_state)
		ret = 1;

	return ret;
}
