/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024.
 * Description: RTOS Process Context asm header
 * Create: 2024-07-13
 */
#include <linux/compiler.h>
#include <linux/bug.h>
#include <linux/jump_label.h>
#include <linux/percpu-defs.h>
#include <linux/rtos_rtpc_rpc.h>

#include <asm/vfp.h>

#include "switch.h"

DEFINE_PER_CPU(bool, rtpc_mm_fast_switch);
DEFINE_STATIC_KEY_FALSE(rtpc_vfp_enabled);
extern union vfp_state *vfp_current_hw_state[NR_CPUS];

static __always_inline void vfp_save(const bool need_save_d8_d15)
{
	struct thread_info *const current_thread = current_thread_info();
	u32 fpexc;
	u32 fpscr;
	u32 fpinst;
	u32 fpinst2;
	union vfp_state *vfp_state;

	__asm__ (
			".fpu	vfpv2\n\t"
			"vmrs	%0, FPEXC"
			: "=r"(fpexc)
			:
			: "cc");

	if (unlikely(fpexc & FPEXC_EN)) {
		vfp_state = vfp_current_hw_state[current_thread->cpu];
#ifdef CONFIG_RTOS_RTPC_RPC_DEBUG_BUILD
		BUG_ON(vfp_state != &current_thread_info()->vfpstate);
#endif

		vfp_state->hard.fpexc = fpexc;

		if (need_save_d8_d15) {
			__asm__ volatile (
					"vstmia %0, {d8-d15}"
					:
					: "r"(&vfp_state->hard.fpregs[8])
					: "cc", "memory");
		}

		__asm__ volatile (
				"vmrs   %0, fpscr"
				: "=r"(fpscr)
				:
				: "cc");
		vfp_state->hard.fpscr = fpscr;

		if (fpexc & FPEXC_EX) {
			__asm__ volatile (
					"vmrs	%0, fpinst"
					: "=r"(fpinst)
					:
					: "cc");
			vfp_state->hard.fpinst = fpinst;
			if (fpexc & FPEXC_FP2V) {
				__asm__ volatile (
						"vmrs	%0, fpinst2"
						: "=r"(fpinst2)
						:
						: "cc");
				vfp_state->hard.fpinst2 = fpinst2;
			}
		}

		/*
		 * we must save vfp regs first then disable vfp
		 * add "m"(*vfp_state) to input
		 * to prevent compiler reorder
		 */
		asm (
				".fpu	vfpv2\n\t"
				"vmsr	FPEXC, %0"
				:
				: "r"(fpexc & ~FPEXC_EN), "m"(*vfp_state)
				: "cc");
	}
}

asmlinkage __aligned(64) rtpc_notrace
unsigned long long rtpc_do_call_fast(struct task_struct *const client_task,
		struct rpc_service_info *const service_info, void *unused_var,
		struct task_struct *const server_task)
{
	do_call_fast(client_task, server_task, service_info);

	if (static_branch_unlikely(&rtpc_vfp_enabled))
		vfp_save(true);

	return ((unsigned long long)(uintptr_t)task_pt_regs(server_task)) |
	       (((unsigned long long)(uintptr_t)service_info) << 32);
}

asmlinkage __aligned(64) rtpc_notrace
unsigned long long rtpc_ret_call_fast(struct task_struct *const server_task,
		struct rpc_service_info *const service_info,
		struct task_struct *const client_task)
{
	const long ret = ret_call_fast(client_task, server_task, service_info);

	return ((unsigned long long)(u32)ret) |
	       (((unsigned long long)(uintptr_t)service_info) << 32);
}

asmlinkage __aligned(64) rtpc_notrace
void rtpc_do_fixup(struct task_struct *const server_task,
		struct rpc_service_info *const service_info, const size_t reason)
{
	/* We must use read once, because the service_info is not locked */
	struct task_struct *const client_task = READ_ONCE(service_info->calling_client_task);

	/* Only server is not inited or has been exited will goto here */
	if (unlikely(!client_task))
		return;

	do_fixup(client_task, server_task, service_info, reason);
}

asmlinkage __aligned(64) rtpc_notrace
unsigned long long rtpc_ret_call_exception(struct task_struct *const server_task,
		struct rpc_service_info *const service_info, struct task_struct *const client_task)
{
	ret_call_exception(client_task, server_task, service_info);

	if (static_branch_unlikely(&rtpc_vfp_enabled))
		vfp_save(false);

	return ((unsigned long long)(uintptr_t)task_pt_regs(client_task)) |
	       (((unsigned long long)(uintptr_t)service_info) << 32);
}


// CONFIG_CPU_V7M will affect the implementation of the system call entry.
// vector_swi in arch/arm/kernel/entry-common.S
#ifdef CONFIG_CPU_V7M
#error "CONFIG check failed"
#endif

// It affects the kernel entry process and the context switch process.
// vector_swi in arch/arm/kernel/entry-common.S
// __switch_to in arch/arm/kernel/entry-armv.S
// usr_entry in arch/arm/kernel/entry-armv.S
#ifdef CONFIG_THUMB2_KERNEL
#error "CONFIG check failed"
#endif

// CONFIG_CONTEXT_TRACKING will affect kernel entry process,
// affecting the define of TRACE() macro:
// vector_swi in arch/arm/kernel/entry-common.S
// usr_entry in arch/arm/kernel/entry-armv.S
#ifndef CONFIG_CONTEXT_TRACKING
#error "CONFIG check failed"
#endif

// It affects kernel entry process,
// affecting the define of zero_fp macro
// vector_swi in arch/arm/kernel/entry-common.S
// usr_entry in arch/arm/kernel/entry-armv.S
#ifdef CONFIG_FRAME_POINTER
#error "CONFIG check failed"
#endif

// It affects kernel entry process,
// vector_swi in arch/arm/kernel/entry-common.S
// usr_entry in arch/arm/kernel/entry-armv.S
#ifndef CONFIG_ALIGNMENT_TRAP
#error "CONFIG check failed"
#endif

// It affects kernel entry process,
// affecting the implementation of asm_trace_hardirqs_on
// vector_swi in arch/arm/kernel/entry-common.S
#ifdef CONFIG_TRACE_IRQFLAGS
#error "CONFIG check failed"
#endif

// It affects the abi of syscall
// current, r7 is the syscall number
#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
#error "CONFIG check failed"
#endif

// It affects kernel entry process,
// affecting the implementation of uaccess_disable
// vector_swi in arch/arm/kernel/entry-common.S
// usr_entry in arch/arm/kernel/entry-armv.S
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
#error "CONFIG check failed"
#endif

// It affects the implementation bp hardening in mm switching
// CONFIG_CPU_V7 will compile:
// arch/arm/mm/proc-v7.S
// arch/arm/mm/proc-v7-bugs.S
// CONFIG_CPU_V6 || CONFIG_CPU_V6K will compile:
// arch/arm/mm/proc-v6.S
// CONFIG_CPU_V7M will compile:
// arch/arm/mm/proc-v7m.S
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || \
	defined(CONFIG_CPU_V7M) || !defined(CONFIG_CPU_V7)
#error "CONFIG check failed"
#endif

// It affects context switching process
// __switch_to in arch/arm/kernel/entry-armv.S
#ifdef CONFIG_CPU_USE_DOMAINS
#error "CONFIG check failed"
#endif

// It affects context switching process
// If this is on, it will run contextidr_notifier(arch/arm/mm/context.c)
// in __switch_to(arch/arm/kernel/entry-armv.S)
#ifdef CONFIG_PID_IN_CONTEXTIDR
#error "CONFIG check failed"
#endif

// It affects context switching process
// If this is on, it will run iwmmxt_task_switch(arch/arm/kernel/iwmmxt.S)
// in __switch_to(arch/arm/kernel/entry-armv.S)
#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
#error "CONFIG check failed"
#endif

// It affects context switching process
// If this is on, it will run thumbee_notifier(arch/arm/kernel/thumbee.c)
// in __switch_to(arch/arm/kernel/entry-armv.S)
#ifdef CONFIG_ARM_THUMBEE
#error "CONFIG check failed"
#endif

// It affects context switching process
// If this is on, it will run dsp_do(arch/arm/kernel/xscale-cp0.c)
// in __switch_to(arch/arm/kernel/entry-armv.S)
#if defined(CONFIG_CPU_XSCALE) || defined(CONFIG_CPU_XSC3) || defined(CONFIG_CPU_MOHAWK)
#error "CONFIG check failed"
#endif

// It affects context switching process
// If this is on, it will run crunch_do(arch/arm/mach-ep93xx/crunch.c)
// in __switch_to(arch/arm/kernel/entry-armv.S)
#ifdef CONFIG_ARCH_EP93XX
#error "CONFIG check failed"
#endif

// It affects the way how to read per cpu var
// rtpc need to fit reading rtpc_mm_fast_switch in
// arch/arm/mm/proc-v7.S
#ifdef CONFIG_CPU_V6
#error "CONFIG check failed"
#endif

// affecting the way how to read FPEXEC regs
// need to fit in this file
#ifndef CONFIG_AS_VFP_VMRS_FPINST
#error "CONFIG check failed"
#endif

// If this is enabled, it will run smp_processor_id() in raw_spin_lock
// which maybe wrong when fixup is not done
#ifdef CONFIG_DEBUG_SPINLOCK
#error "CONFIG check failed"
#endif

// It affects context switching process
// __switch_to -> switch_tls in arch/arm/kernel/entry-armv.S
// need to fit in arch/arm/include/asm/rtos_rtpc.h
#if defined(CONFIG_TLS_REG_EMUL) || defined(CONFIG_CPU_V6) || !defined(CONFIG_CPU_32v6K)
#error "CONFIG check failed"
#endif
