/*
 * Copyright (c) 2023 Nordic Semiconductor ASA
 *
 * SPDX-License-Identifier: Apache-2.0
 */

#include <zephyr/kernel.h>
#include <zephyr/init.h>
#include <zephyr/sys/__assert.h>
#include <tfm_ioctl_api.h>
#include <zephyr/logging/log.h>

LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);

#if defined(CONFIG_PRINTK) || defined(CONFIG_LOG)
#define PR_EXC(...) LOG_ERR(__VA_ARGS__)
#define STORE_xFAR(reg_var, reg) uint32_t reg_var = (uint32_t)reg
#else
#define PR_EXC(...)
#define STORE_xFAR(reg_var, reg)
#endif /* CONFIG_PRINTK || CONFIG_LOG */

#if (CONFIG_FAULT_DUMP == 2)
#define PR_FAULT_INFO(...) PR_EXC(__VA_ARGS__)
#else
#define PR_FAULT_INFO(...)
#endif

#if defined(CONFIG_ARMV8_M_SE)
/* CMSIS core_cm33 header only define these when targeting secure state,
 * i.e __ARM_FEATURE_CMSE==3
 */

/* Secure Fault Status Register Definitions */
#define SAU_SFSR_LSERR_Pos      7U                             /*!< SAU SFSR: LSERR Position */
#define SAU_SFSR_LSERR_Msk     (1UL << SAU_SFSR_LSERR_Pos)     /*!< SAU SFSR: LSERR Mask */

#define SAU_SFSR_SFARVALID_Pos  6U                             /*!< SAU SFSR: SFARVALID Position */
#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */

#define SAU_SFSR_LSPERR_Pos     5U                             /*!< SAU SFSR: LSPERR Position */
#define SAU_SFSR_LSPERR_Msk    (1UL << SAU_SFSR_LSPERR_Pos)    /*!< SAU SFSR: LSPERR Mask */

#define SAU_SFSR_INVTRAN_Pos    4U                             /*!< SAU SFSR: INVTRAN Position */
#define SAU_SFSR_INVTRAN_Msk   (1UL << SAU_SFSR_INVTRAN_Pos)   /*!< SAU SFSR: INVTRAN Mask */

#define SAU_SFSR_AUVIOL_Pos     3U                             /*!< SAU SFSR: AUVIOL Position */
#define SAU_SFSR_AUVIOL_Msk    (1UL << SAU_SFSR_AUVIOL_Pos)    /*!< SAU SFSR: AUVIOL Mask */

#define SAU_SFSR_INVER_Pos      2U                             /*!< SAU SFSR: INVER Position */
#define SAU_SFSR_INVER_Msk     (1UL << SAU_SFSR_INVER_Pos)     /*!< SAU SFSR: INVER Mask */

#define SAU_SFSR_INVIS_Pos      1U                             /*!< SAU SFSR: INVIS Position */
#define SAU_SFSR_INVIS_Msk     (1UL << SAU_SFSR_INVIS_Pos)     /*!< SAU SFSR: INVIS Mask */

#define SAU_SFSR_INVEP_Pos      0U                             /*!< SAU SFSR: INVEP Position */
#define SAU_SFSR_INVEP_Msk     (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */

#endif /* CONFIG_ARMV8_M_SE */

void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);

static struct tfm_ns_fault_service_handler_context g_context;

#if (CONFIG_FAULT_DUMP == 1)
static void fault_show(const struct arch_esf *esf, int fault)
{
	uint32_t cfsr = g_context.status.cfsr;
	uint32_t sfsr = g_context.status.sfsr;

	PR_EXC("Fault! EXC #%d", fault);

#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
	PR_EXC("MMFSR: 0x%x, BFSR: 0x%x, UFSR: 0x%x",
		(uint32_t)((cfsr & SCB_CFSR_MEMFAULTSR_Msk) >> SCB_CFSR_MEMFAULTSR_Pos),
		(uint32_t)((cfsr & SCB_CFSR_BUSFAULTSR_Msk) >> SCB_CFSR_BUSFAULTSR_Pos),
		(uint32_t)((cfsr & SCB_CFSR_USGFAULTSR_Msk) >> SCB_CFSR_USGFAULTSR_Pos));
	PR_EXC("SFSR: 0x%x", sfsr);
#endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
}
#else
/* For Dump level 2, detailed information is generated by the
 * fault handling functions for individual fault conditions, so this
 * function is left empty.
 *
 * For Dump level 0, no information needs to be generated.
 */
static void fault_show(const struct arch_esf *esf, int fault)
{
	(void)esf;
	(void)fault;
}
#endif /* FAULT_DUMP == 1 */

static int bus_fault(struct arch_esf *esf)
{
	uint32_t reason = K_ERR_ARM_BUS_GENERIC;
	uint32_t cfsr = g_context.status.cfsr;

	PR_FAULT_INFO("***** BUS FAULT *****");

	if (cfsr & SCB_CFSR_STKERR_Msk) {
		reason = K_ERR_ARM_BUS_STACKING;
		PR_FAULT_INFO("  Stacking error");
	}
	if (cfsr & SCB_CFSR_UNSTKERR_Msk) {
		reason = K_ERR_ARM_BUS_UNSTACKING;
		PR_FAULT_INFO("  Unstacking error");
	}
	if (cfsr & SCB_CFSR_PRECISERR_Msk) {
		reason = K_ERR_ARM_BUS_PRECISE_DATA_BUS;
		PR_FAULT_INFO("  Precise data bus error");
		/* In a fault handler, to determine the true faulting address:
		 * 1. Read and save the BFAR value.
		 * 2. Read the BFARVALID bit in the BFSR.
		 * The BFAR address is valid only if this bit is 1.
		 *
		 * Software must follow this sequence because another
		 * higher priority exception might change the BFAR value.
		 */
		STORE_xFAR(bfar, SCB->BFAR);

		if ((cfsr & SCB_CFSR_BFARVALID_Msk) != 0) {
			PR_EXC("  BFAR Address: 0x%x", bfar);
		}
	}
	if (cfsr & SCB_CFSR_IMPRECISERR_Msk) {
		reason = K_ERR_ARM_BUS_IMPRECISE_DATA_BUS;
		PR_FAULT_INFO("  Imprecise data bus error");
	}
	if ((cfsr & SCB_CFSR_IBUSERR_Msk) != 0) {
		reason = K_ERR_ARM_BUS_INSTRUCTION_BUS;
		PR_FAULT_INFO("  Instruction bus error");
#if !defined(CONFIG_ARMV7_M_ARMV8_M_FP)
	}
#else
	} else if (cfsr & SCB_CFSR_LSPERR_Msk) {
		reason = K_ERR_ARM_BUS_FP_LAZY_STATE_PRESERVATION;
		PR_FAULT_INFO("  Floating-point lazy state preservation error");
	} else {
		;
	}
#endif /* !defined(CONFIG_ARMV7_M_ARMV8_M_FP) */

	return reason;
}

static uint32_t secure_fault(const struct arch_esf *esf)
{
	uint32_t reason = K_ERR_ARM_SECURE_GENERIC;
	uint32_t sfsr = g_context.status.sfsr;

	PR_FAULT_INFO("***** SECURE FAULT *****");

	STORE_xFAR(sfar, g_context.status.sfar);
	if ((sfsr & SAU_SFSR_SFARVALID_Msk) != 0) {
		PR_EXC("  Address: 0x%x", sfar);
	}

	/* bits are sticky: they stack and must be reset */
	if ((sfsr & SAU_SFSR_INVEP_Msk) != 0) {
		reason = K_ERR_ARM_SECURE_ENTRY_POINT;
		PR_FAULT_INFO("  Invalid entry point");
	} else if ((sfsr & SAU_SFSR_INVIS_Msk) != 0) {
		reason = K_ERR_ARM_SECURE_INTEGRITY_SIGNATURE;
		PR_FAULT_INFO("  Invalid integrity signature");
	} else if ((sfsr & SAU_SFSR_INVER_Msk) != 0) {
		reason = K_ERR_ARM_SECURE_EXCEPTION_RETURN;
		PR_FAULT_INFO("  Invalid exception return");
	} else if ((sfsr & SAU_SFSR_AUVIOL_Msk) != 0) {
		reason = K_ERR_ARM_SECURE_ATTRIBUTION_UNIT;
		PR_FAULT_INFO("  Attribution unit violation");
	} else if ((sfsr & SAU_SFSR_INVTRAN_Msk) != 0) {
		reason = K_ERR_ARM_SECURE_TRANSITION;
		PR_FAULT_INFO("  Invalid transition");
	} else if ((sfsr & SAU_SFSR_LSPERR_Msk) != 0) {
		reason = K_ERR_ARM_SECURE_LAZY_STATE_PRESERVATION;
		PR_FAULT_INFO("  Lazy state preservation");
	} else if ((sfsr & SAU_SFSR_LSERR_Msk) != 0) {
		reason = K_ERR_ARM_SECURE_LAZY_STATE_ERROR;
		PR_FAULT_INFO("  Lazy state error");
	}

	return reason;
}

static uint32_t spu_fault(void)
{
	uint32_t reason = K_ERR_ARM_MEM_DATA_ACCESS;
	uint32_t events = g_context.status.spu_events;

	PR_FAULT_INFO("***** SPU INTERRUPT *****");

	if (events & TFM_SPU_EVENT_FLASHACCERR) {
		PR_FAULT_INFO("  Security violation for the flash memory space");
	}
	if (events & TFM_SPU_EVENT_RAMACCERR) {
		PR_FAULT_INFO("  Security violation for the RAM memory space");
	}
	if (events & TFM_SPU_EVENT_PERIPHACCERR) {
		PR_FAULT_INFO("  Security violation on one or several peripherals.");
	}

	return reason;
}

static void reserved_exception(const struct arch_esf *esf, int fault)
{
	ARG_UNUSED(esf);

	PR_FAULT_INFO("***** %s %d) *****",
	       fault < 16 ? "Reserved Exception (" : "Spurious interrupt (IRQ ",
	       fault - 16);
}


static uint32_t fault_handle(struct arch_esf *esf, int fault)
{
	uint32_t reason = K_ERR_CPU_EXCEPTION;

	switch (fault) {
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
	/* HardFault is raised for all fault conditions on ARMv6-M. */
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
	case 5:
		reason = bus_fault(esf);
		break;
	/* Secure Faults are forwarded to non-secure. */
	case 7:
		reason = secure_fault(esf);
		break;
	case (16 + 3):
		reason = spu_fault();
		break;
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
	default:
		reserved_exception(esf, fault);
		break;
	}

	fault_show(esf, fault);

	return reason;
}

/* Handled by get_esf, but we don't need the rest of that handling. */
static inline bool is_nested_exc(uint32_t exc_return)
{
	/* EXC_RETURN_MODE == 1 => Thread mode. */
	return !(exc_return & EXC_RETURN_MODE);
}

/* z_arm_fault reads the hardware fault status registers so we cannot call it.
 * This functions should do the same but use the callback provided information
 * instead.
 * The recoverable handling is ignored, as none of this is considered to be
 * recoverable.
 * The stack frame is already determined by the ns callback service, so we can
 * assume that we have the right exception stack frame already.
 */
void arm_fault(uint32_t exc_return, struct arch_esf *esf)
{
	uint32_t reason;
	int fault = g_context.status.vectactive;
	bool nested_exc;

	nested_exc = is_nested_exc(exc_return);

	reason = fault_handle(esf, fault);

	if (nested_exc) {
		if ((esf->basic.xpsr & IPSR_ISR_Msk) == 0) {
			esf->basic.xpsr |= IPSR_ISR_Msk;
		}
	} else {
		esf->basic.xpsr &= ~(IPSR_ISR_Msk);
	}

	if (IS_ENABLED(CONFIG_SIMPLIFIED_EXCEPTION_CODES) && (reason >= K_ERR_ARCH_START)) {
		reason = K_ERR_CPU_EXCEPTION;
	}

	z_arm_fatal_error(reason, esf);
}

void tfm_ns_fault_handler_callback(void)
{
	struct arch_esf esf;
	uint32_t exc_return;
	CONTROL_Type ctrl_ns;

	__ASSERT(g_context.valid, "Non-secure callback without valid context");

	esf.basic.r0 = g_context.frame.r0;
	esf.basic.r1 = g_context.frame.r1;
	esf.basic.r2 = g_context.frame.r2;
	esf.basic.r3 = g_context.frame.r3;
	esf.basic.r12 = g_context.frame.r12;
	esf.basic.lr = g_context.frame.lr;
	esf.basic.pc = g_context.frame.pc;
	esf.basic.xpsr = g_context.frame.xpsr;

	/* Provide the z_arm_fatal_error function an EXC_RETURN that looks like
	 * the fault is triggered and handled by NS handlers.
	 */
	exc_return = g_context.status.exc_return;
	/* Adjust EXC_RETURN value to emulate NS exception entry */
	exc_return &= ~EXC_RETURN_ES;

	/* Update SPSEL to reflect correct CONTROL_NS.SPSEL setting */
	exc_return &= ~(EXC_RETURN_SPSEL);
	ctrl_ns.w = g_context.status.control;
	if (ctrl_ns.b.SPSEL) {
		exc_return |= EXC_RETURN_SPSEL;
	}

#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
	_callee_saved_t callee_regs;

	esf.extra_info.callee = &callee_regs;
	esf.extra_info.msp = g_context.status.msp;

	callee_regs.psp = g_context.status.psp;

	callee_regs.v1 = g_context.registers.r4;
	callee_regs.v2 = g_context.registers.r5;
	callee_regs.v3 = g_context.registers.r6;
	callee_regs.v4 = g_context.registers.r7;
	callee_regs.v5 = g_context.registers.r8;
	callee_regs.v6 = g_context.registers.r9;
	callee_regs.v7 = g_context.registers.r10;
	callee_regs.v8 = g_context.registers.r11;

	esf.extra_info.exc_return = exc_return;
#endif

	arm_fault(exc_return, &esf);
}

struct tfm_ns_fault_service_handler_context *tfm_ns_fault_get_context(void)
{
	if (!g_context.valid) {
		return NULL;
	}

	return &g_context;
}

static int ns_fault_init(void)
{
	int err = 0;

	err = tfm_platform_ns_fault_set_handler(&g_context, &tfm_ns_fault_handler_callback);
	if (err) {
		LOG_ERR("TF-M non-secure callback initialization failed, error: %d", err);
		return err;
	}

	return err;
}

SYS_INIT(ns_fault_init, PRE_KERNEL_1, 0);
