/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024.
 * Description: RTOS Process Context
 * Create: 2023-08-15
 */
#include <linux/mmu_context.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/vtime.h>
#include <linux/kernel_stat.h>
#include <linux/sched/debug.h>
#include <trace/events/sched.h>
#include <asm/hw_breakpoint.h>
#include <asm/mmu_context.h>
#include <asm/rtos_rtpc.h>

#define thread_saved_context(tsk)	\
	((struct cpu_context *)(&tsk->thread.cpu_context))

DEFINE_PER_CPU(struct task_struct *, rtpc_curr_task);
DEFINE_PER_CPU(void *, rtpc_curr_sp);
DEFINE_PER_CPU(bool, rtpc_mm_fast_switch);

void rtpc_set_current_prepare(struct task_struct *task, void *sp)
{
	__this_cpu_write(rtpc_curr_task, task);
	__this_cpu_write(rtpc_curr_sp, sp);
}

void rtpc_context_switch_user(struct task_struct *prev, struct task_struct *next)
{
	// Set rtpc_mm_fast_switch to skip bp hardening in fast switch
	raw_cpu_write(rtpc_mm_fast_switch, true);
	// In this function, it will check rtpc_mm_fast_switch flag
	switch_mm_irqs_off(prev->active_mm, next->mm, next);
	raw_cpu_write(rtpc_mm_fast_switch, false);

	rtpc_fpsimd_thread_switch(next);
	rtpc_tls_thread_switch(next);
}

static void rtpc_vtime_task_switch(struct task_struct *prev, struct task_struct *next)
{
	struct vtime *vtime;
	u64 delta;

	if (!vtime_accounting_enabled_this_cpu())
		return;

	vtime = &prev->vtime;
	delta = sched_clock() - vtime->starttime;
	vtime->stime += delta;
	vtime->starttime += delta;
	if (vtime->stime >= TICK_NSEC) {
		account_system_index_time(prev, vtime->stime, CPUTIME_SYSTEM);
		vtime->stime = 0;
	}
	vtime->state = VTIME_INACTIVE;
	vtime->cpu = -1;

	vtime = &next->vtime;
	vtime->state = VTIME_SYS;
	vtime->starttime = sched_clock();
	vtime->cpu = smp_processor_id();
}

void rtpc_context_switch_kernel(struct task_struct *prev, struct task_struct *next)
{
	hw_breakpoint_thread_switch(next);
	contextidr_thread_switch(next);
	rtpc_entry_task_switch(next);
	rtpc_vtime_task_switch(prev, next);
}

void rtpc_context_switch(struct task_struct *prev, struct task_struct *next)
{
	rtpc_context_switch_user(prev, next);
	rtpc_context_switch_kernel(prev, next);
}

void rtpc_show_exception_info(unsigned long is_el0_sync, struct task_struct *tsk)
{
	unsigned long esr_el1;
	unsigned long far_el1;
	struct pt_regs *regs;

	esr_el1 = read_sysreg(esr_el1);
	esr_el1 = ESR_ELx_EC(esr_el1);
	far_el1 = read_sysreg(far_el1);
	regs = task_pt_regs(tsk);

	pr_info("---------------------- [exception info start] ---------------------\n");
	pr_info("task = %s/%d\n", tsk->comm, tsk->pid);
	pr_info("is_el0_sync = %lu, esr_el1 = 0x%lx, far_el1 = 0x%lx\n",
			is_el0_sync, esr_el1, far_el1);
	show_regs(regs);
	pr_info("----------------------- [exception info end] ----------------------\n");
}

void *rtpc_alloc_cpu_context(void)
{
	return kcalloc(1, sizeof(struct cpu_context), GFP_ATOMIC);
}

void rtpc_save_cpu_context(struct task_struct *tsk, void *save_context)
{
	struct cpu_context *task_ctx = thread_saved_context(tsk);

	memcpy((struct cpu_context *)save_context, task_ctx, sizeof(struct cpu_context));
}

void rtpc_recover_cpu_context(struct task_struct *tsk, const void *save_context)
{
	struct cpu_context *task_ctx = thread_saved_context(tsk);

	memcpy(task_ctx, (const struct cpu_context *)save_context, sizeof(struct cpu_context));
}
