/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2021-2024.
 * Description: RTOS Process Context
 * Create: 2021-08-15
 */
#include <linux/sched.h>
#include <../kernel/sched/sched.h>
#include <../kernel/audit.h>
#include <linux/task_struct_extend.h>
#include <linux/rtos_rtpc.h>

#define RECOVER_PREEMPT_COUNT (2 * PREEMPT_DISABLE_OFFSET)

#ifdef CONFIG_ARM64
void rtpc_correct_task_struct(struct task_struct *tsk)
{
	struct thread_info *thread_info = task_thread_info(tsk);
	struct audit_context *context = tsk->audit_context;

	thread_info->preempt_count = 0;
	BUG_ON(context && context->in_syscall != 0);
}

void rtpc_recover_task_struct(struct task_struct *tsk)
{
	struct thread_info *thread_info = task_thread_info(tsk);
	struct audit_context *context = tsk->audit_context;

	thread_info->preempt_count = RECOVER_PREEMPT_COUNT;
	BUG_ON(context && context->in_syscall != 0);
}

void rtpc_recover_task_stack(void *start_sp, const void *save_kstack,
	size_t kstack_size)
{
	memcpy(start_sp, save_kstack, kstack_size);
}

static void rtpc_deactivate_task(struct task_struct *prev)
{
	int cpu;
	struct rq *rq;

	cpu = smp_processor_id();
	rq = cpu_rq(cpu);

	raw_spin_lock(&rq->lock);
	rtpc_set_task_state(prev, TASK_RTPC_UNWAKEABLE);
	deactivate_task(rq, prev, DEQUEUE_SLEEP);
	++prev->nvcsw;
	put_prev_task(rq, prev);
	BUG_ON(prev->on_rq);
	prev->on_cpu = 0;
	raw_spin_unlock(&rq->lock);
}

static void rtpc_activate_task(struct task_struct *next, struct task_struct *prev)
{
	int cpu;
	struct rq *rq;

	cpu = raw_smp_processor_id();
	rq = cpu_rq(cpu);

	set_cpus_allowed_ptr(next, cpumask_of(cpu));
	wake_up_state(next, TASK_RTPC_UNWAKEABLE);

	BUG_ON(next->on_rq == 0);

	raw_spin_lock(&rq->lock);
	rq->clock_update_flags = RQCF_ACT_SKIP;
	rtpc_set_next_task(rq, next);
	next->on_cpu = 1;
	rq->curr = next;
	perf_event_task_sched_out(prev, next);
	perf_event_task_sched_in(prev, next);
	rq->clock_update_flags = 0;
	raw_spin_unlock(&rq->lock);
}

void rtpc_sched_switch(struct task_struct *prev, struct task_struct *next)
{
	rtpc_deactivate_task(prev);
	rtpc_activate_task(next, prev);
}

#else

static __always_inline
void rtpc_vtime_task_switch(struct task_struct *prev, struct task_struct *next, int cpu);

void rtpc_notrace rtpc_sched_switch(struct task_struct *prev, struct task_struct *next)
{
	const int cpu = task_cpu(prev);
	struct rq_flags rf;
	struct rq *const rq = cpu_rq(cpu);
	int wake_en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
	struct thread_info *const prev_thread_info = task_thread_info(prev);
	struct thread_info *const next_thread_info = task_thread_info(next);

	/*
	 * Note:
	 * On arm32, doing fixup here
	 * current is server, and [raw_]smp_processor_id(); is server->cpu
	 * This is wrong
	 * task_cpu(prev) is correct cpu now
	 * We must becarefull don't use [raw_]smp_processor_id() before setting server->cpu
	 */

	/*
	 * correcting task preempt count
	 * see comments in kernel/sched/core.c:finish_task_switch()
	 * note that raw_spin_lock() will run preempt_disable()
	 * so we can't change preempt count when locked
	 */
# ifdef CONFIG_RTOS_RTPC_RPC_DEBUG_BUILD
	BUG_ON(READ_ONCE(prev_thread_info->preempt_count) != 0);
	BUG_ON(READ_ONCE(next_thread_info->preempt_count) != RECOVER_PREEMPT_COUNT);
# endif
	WRITE_ONCE(prev_thread_info->preempt_count, RECOVER_PREEMPT_COUNT);
	WRITE_ONCE(next_thread_info->preempt_count, 0);

	raw_spin_lock(&next->pi_lock);
	smp_mb__after_spinlock();
	BUG_ON(READ_ONCE(next->state) != TASK_RTPC_UNWAKEABLE);
	/* trace_sched_waking will use smp_processor_id(), so skip */

	/* This is copy from try_to_wake_up:kernel/sched/core.c */
	smp_rmb();
	BUG_ON(READ_ONCE(next->on_rq));

	smp_acquire__after_ctrl_dep();

	WRITE_ONCE(next->state, TASK_WAKING);

	/* This is quoted from try_to_wake_up:kernel/sched/core.c */
	BUG_ON(smp_load_acquire(&next->on_cpu));

	/*
	 * already check:
	 * next->state
	 * next->on_rq
	 * next->on_cpu
	 */
	if (task_cpu(next) != cpu) {
		BUG_ON(next->in_iowait);

		wake_en_flags |= ENQUEUE_MIGRATED;

		/* here will read next->cpu, must do before set_task_cpu */
		psi_ttwu_dequeue(next);

		set_task_cpu(next, cpu);
	}

	BUG_ON(task_cpu(next) != cpu);

	rq_lock(rq, &rf);
	rq->clock_update_flags <<= 1;
	update_rq_clock(rq);

	/*
	 * It is safe to set next->cpus_mask here
	 * Because next->pi_lock is locked and next is not on rq
	 * Linux examples:
	 * try_to_wake_up -> select_task_rq ->
	 * select_fallback_rq -> do_set_cpus_allowed
	 *
	 * It is also safe to read prev->cpus_mask here
	 * because rq lock is hold and prev is on rq
	 */
	do_set_cpus_allowed(next, &prev->cpus_mask);

	rtpc_set_task_state(prev, TASK_RTPC_UNWAKEABLE);
	deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
	BUG_ON(prev->in_iowait);
	put_prev_task(rq, prev);
	BUG_ON(prev->on_rq);

	activate_task(rq, next, wake_en_flags);
	WRITE_ONCE(next->state, TASK_RUNNING);
	/* skip trace_sched_wakeup(next); */
	raw_spin_unlock(&next->pi_lock);
	/* arm32's clear_preempt_need_resched() is empty, so skip */

	if (test_and_clear_ti_thread_flag(prev_thread_info, TIF_NEED_RESCHED))
		set_ti_thread_flag(next_thread_info, TIF_NEED_RESCHED);

	rq->nr_switches++;
	RCU_INIT_POINTER(rq->curr, next);
	set_next_task(rq, next);
	++prev->nvcsw; /* ++*switch_count; */
	psi_sched_switch(prev, next, true);
	/* trace_sched_switch will check current, which is wrong on arm32, so skip */

	/* Refer to prepare_task_switch in kernel/sched/core.c */
	perf_event_task_sched_out(prev, next);
	WRITE_ONCE(next->on_cpu, 1);

	/*
	 * Note that, we do membarrier_switch_mm in fixup/ret_call_exception,
	 * however, the function of membarrier syscall is still abnormal for rtpc tasks
	 * We still need to add constraints to illustrate it.
	 */

	/* help compiler optimizes the code */
	if (unlikely(prev->active_mm == next->mm))
		__builtin_unreachable();
	membarrier_switch_mm(rq, prev->active_mm, next->mm);

	/* Refer to finish_task_switch in kernel/sched/core.c */
	rtpc_vtime_task_switch(prev, next, cpu);
	perf_event_task_sched_in(prev, next);
	/* This is copy from finish_task:kernel/sched/core.c */
	smp_store_release(&prev->on_cpu, 0);

	rq_unlock(rq, &rf);

	/* skip ttwu_stat */
}

/*
 * We use __always_inline here will not increase the size
 * because this function will only be used once
 */
static __always_inline
void rtpc_vtime_task_switch(struct task_struct *prev, struct task_struct *next, const int cpu)
{
	struct vtime *vtime;
	u64 delta;
	unsigned long long current_time;

	if (!vtime_accounting_enabled_this_cpu())
		return;

	current_time = sched_clock();

	vtime = &prev->vtime;
	delta = current_time - vtime->starttime;
	vtime->stime += delta;
	vtime->starttime += delta;
	if (vtime->stime >= TICK_NSEC) {
		account_system_index_time(prev, vtime->stime, CPUTIME_SYSTEM);
		vtime->stime = 0;
	}
	vtime->state = VTIME_INACTIVE;
	vtime->cpu = -1;

	vtime = &next->vtime;
	vtime->state = VTIME_SYS;
	vtime->starttime = current_time;
	vtime->cpu = cpu;
}
#endif

// rtpc_vtime_task_switch fit with CONFIG_VIRT_CPU_ACCOUNTING_GEN
#if !defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN) || defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
#error "CONFIG check failed for rtpc"
#endif
