// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2020-2022 Huawei Technologies
 *
 * Author: huangzhenqiang <huangzhenqiang2@huawei.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation, version 2 of the
 * License.
 *
 * File: hkip.c
 *		main file of HKIP
 *	1. set up the dynamic switch for HKIP.
 *	2. initialise the ro-pool of HKIP, and remove the write permission.
 *	3. set up the genpool of HKIP.
 *	4. register the kernel message to ATF.
 *
 */
#ifdef CONFIG_EULEROS_HKIP_SERVER
#define pr_fmt(fmt) "HKIP: " fmt

#include <linux/genalloc.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/preempt.h>
#include <linux/seq_file.h>
#include <linux/arm-smccc.h>
#include <linux/io.h>
#include <linux/vmalloc.h>
#include <linux/arm_sdei.h>
#include <linux/kprobes.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <asm/sdei.h>
#include <asm-generic/sections.h>
#include <asm-generic/pgalloc.h>
#include <asm/thread_info.h>
#include <asm/atomic.h>
#include <asm/pgtable.h>
#ifdef CONFIG_KEXEC_CORE
#include <asm/kexec.h>
#endif
#include <asm/pgalloc.h>
#include <linux/security.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/processor.h>

#include <linux/hkip.h>

#define NO_BLOCK_MAPPINGS BIT(0)
#define NO_CONT_MAPPINGS BIT(1)

#define HKIP_VERSION 0xbf00ff00 /* Current version */
#define HKIP_UUID 0xbf00ff01 /* Service UUID */
#define HKIP_TEXT_SETUP 0xbf00ff02 /* Linux TEXT segment setup */

#define HKIP_REGS_SETUP 0xbf00ff04 /* Linux Sys regs init */

#define HKIP_VALID_REPLY 0xdeadbeef

#define ARM_SDEI_SYSREGS_EVENT 1003
#define ARM_SDEI_CODEHASH_EVENT 1004
#define ARM_SDEI_PGD_EVENT 1005
#define ARM_SDEI_MODULE_EVENT 1006

#define __hash_start _text /* Start of the hashed buffer */
#define __hash_end _etext /* End of the hashed buffer */
#define SWAPPER_SIZE	   \
	PAGE_SIZE // ((uintptr_t)swapper_pg_end - (uintptr_t)swapper_pg_dir)

void remap_swapper_pg_dir(void);
int hkip_scan_init(void);

/* Physical addresses of the reserved block of memory for pgtables */
atomic64_t ro_mem_block = ATOMIC64_INIT(0);
phys_addr_t ro_mem_block_end, ro_mem_block_start;
static phys_addr_t ro_mem_block_vmlinuz;
bool hkip_early_alloc = true;

/* HKIP enabled at boot */
bool hkip_status;
EXPORT_SYMBOL(hkip_status);
atomic64_t hkip_dumpstack = ATOMIC64_INIT(1);
EXPORT_SYMBOL(hkip_dumpstack);
enum hkip_safety_level_type hkip_safety_level;
/* HKIP enabled at ATF */
static bool hkip_atf_status = true;
static bool pgd_remapped;
static bool hkip_module_start;

static int sdei_event;
static struct work_struct sdei_wq;
static struct task_struct *kthread;

static int __init hkip_enabled_setup(char *arg)
{
	u32 ver = invoke_smc_fn(HKIP_UUID, 0, 0, 0);

	pr_info("ATF answered with 0x%x\n", ver);
	if (ver != HKIP_VALID_REPLY) {
		pr_err("HKIP is not supported in ATF\n");
		hkip_atf_status = false;

		return -1;
	}

	pr_info("parse hkip cmdline: %s\n", arg);
	if (!arg || strcmp(arg, "normal") == 0) {
		hkip_safety_level = HKIP_NORMAL_LEVEL;
		pr_info("HKIP is set to the normal level.\n");
	} else if (strcmp(arg, "strict") == 0) {
		hkip_safety_level = HKIP_STRICT_LEVEL;
		pr_info("HKIP is set to the strict level.\n");
	} else {
		hkip_safety_level = HKIP_CLOSE_LEVEL;
		hkip_status = false;
		pr_err("hkip parameter is wrong.\n");
		return -1;
	}

#ifdef CONFIG_KASAN
	hkip_status = false;
	pr_info("Running with KASAN, HKIP will not work\n");
#else
	if (hkip_safety_level == HKIP_NORMAL_LEVEL ||
		hkip_safety_level == HKIP_STRICT_LEVEL) {
		hkip_status = true;
		pr_info("HKIP is enabled by the start-up parameter\n");
	}
#endif

	return 0;
}
early_param("hkip_enabled", hkip_enabled_setup);

static inline bool is_ropgtable(phys_addr_t pgtable)
{
	return pgtable >= ro_mem_block_start && pgtable < ro_mem_block_end;
}

bool hkip_ro_pool(void *ptr)
{
	phys_addr_t phys_ptr = 0;

	if (__is_lm_address((uintptr_t)ptr)) {
		phys_ptr = __pa(ptr);
		if (phys_ptr >= __pa_symbol(__start_rodata) &&
			phys_ptr <= __pa_symbol(__inittext_begin))
			return true;
	} else {
		if ((uintptr_t)ptr >= (uintptr_t)&__start_rodata &&
			(uintptr_t)ptr <= (uintptr_t)&__inittext_begin)
			return true;
	}
	if (!pgd_remapped)
		return false;

	return is_ropgtable(__pa_nodebug(ptr));
}
EXPORT_SYMBOL(hkip_ro_pool);

/* INSN is always 32 bits long */
int hkip_insn_write(uintptr_t addr, uint32_t insn)
{
	u32 ret = invoke_smc_fn(HKIP_INSN_WRITE, addr, insn, 0);

	if (ret) {
		pr_err("HKIP_INSN_WRITE write err! addr: %pK, insn: %x\n", (void *)addr, insn);
		WARN_ON(ret);
		return -EFAULT;
	}

	return 0;
}

void hkip_module_status(int load)
{
	u32 ret;

	if (!hkip_enabled())
		return;

	if (!hkip_module_start)
		return;

	ret = invoke_smc_fn(HKIP_MODULE_STATUS, load, 0, 0);
	BUG_ON(ret);

	if (load == HKIP_ALLOW_MODULE_LOAD)
		atomic64_inc(&hkip_dumpstack);
	else if (load == HKIP_END_MODULE_LOAD)
		atomic64_dec(&hkip_dumpstack);
}

static struct gen_pool *hkip_ro_genpool;
static int hkip_ropool_init(void)
{
	int err;
	phys_addr_t start = atomic64_xchg(&ro_mem_block, ro_mem_block_end);

	ro_mem_block_vmlinuz = start;

	if (!hkip_status)
		return 0;

	hkip_ro_genpool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
	if (!hkip_ro_genpool) {
		pr_err("HKIP create gen pool failed!\n");
		return -1;
	}

	err = gen_pool_add(hkip_ro_genpool, (uintptr_t)__va(start),
			   (size_t)(ro_mem_block_end - start), NUMA_NO_NODE);
	if (err) {
		pr_err("RO POOL initialized failed, %d\n", err);
		return -1;
	}

	pr_info("ro genpool initialized successfully %pK %pK\n",
		(void *)gen_pool_avail(hkip_ro_genpool), (void *)__va(start));
	pr_info("RO POOL initialized successfully %pK %pK %pK\n",
		(void *)ro_mem_block_start, (void *)ro_mem_block_end,
		(void *)start);

	return 0;
}
early_initcall(hkip_ropool_init);

/* handle the allocated pgd securely */
static void __zero_alloc_pgd(phys_addr_t pgd)
{
	BUG_ON(invoke_smc_fn(HKIP_PGD_ALLOC, (uintptr_t)pgd, 0, 0));
}

uintptr_t hkip_ropool_pgtable_alloc(void)
{
	uintptr_t pgd;

	if (!hkip_ro_genpool) {
		/*
		 * still in __meminit so return pgtables like
		 * early pgtable alloc.
		 */
		if (!slab_is_available()) {
			void *pgd;
			phys_addr_t phys;

			phys = atomic64_fetch_add(PAGE_SIZE, &ro_mem_block);
			pgd = __va(phys);

			if (!pgd_remapped)
				clear_page((void *)pgd);
			else
				__zero_alloc_pgd(__pa(pgd));

			return (uintptr_t)pgd;
		}

		BUG_ON(hkip_ropool_init());
	}

	pgd = gen_pool_alloc(hkip_ro_genpool, PAGE_SIZE);
	BUG_ON(!pgd);

	if (!pgd_remapped)
		clear_page((void *)pgd);
	else
		__zero_alloc_pgd(__pa(pgd));

	return pgd;
}

static inline bool is_vmlinux_ropgtable(phys_addr_t pgtable)
{
	return pgtable >= ro_mem_block_start && pgtable < ro_mem_block_vmlinuz;
}

void hkip_ropool_pgtable_free(uintptr_t pgd)
{
	if (!hkip_ro_genpool) {
		WARN(!hkip_ro_genpool, "Free called before init");

		return;
	}

	if (is_vmlinux_ropgtable(__pa_nodebug(pgd))) {
		pr_err("the init memory can not be free!\n");

		return;
	}

	BUG_ON(!pgd);
	gen_pool_free(hkip_ro_genpool, pgd, PAGE_SIZE);
}

static int __init hkip_initialize_sys_reg(uint64_t swapper_pg,
					  uint64_t swapper_size,
					  uint64_t tramp_pg,
					  uint64_t tramp_size)
{
	struct arm_smccc_res res;

	arm_smccc_smc(HKIP_REGS_SETUP, swapper_pg, swapper_size, tramp_pg,
			  tramp_size, FIXADDR_START, FIXADDR_TOP, 0, &res);
	if (res.a0 == 0)
		pr_info("system regsters setup success!\n");
	else
		pr_warn("Failed to setup system regsters protection!\n");

	return res.a0;
}

static int __init hkip_init(void)
{
	u64 kernel_start = __phys_addr_symbol((uintptr_t)_text);
	u64 kernel_end = __phys_addr_symbol((uintptr_t)__init_begin);
	u64 swapper_pg = __phys_addr_symbol((uintptr_t)swapper_pg_dir);
	u64 res;
	int ret;

	if (!hkip_status)
		return 0;

	asm volatile("mrs %0, ttbr1_el1" : "=r"(res)::"memory");

	pr_warn("TTBR1 at %pK", (void *)res);
	pr_info("hkip initializing...");
	pr_warn("initial kernel code protect %pK -> %pK", (void *)kernel_start,
		(void *)kernel_end);
	pr_warn("swapper at %pK", (void *)swapper_pg);

#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
	ret = hkip_initialize_sys_reg((uintptr_t)swapper_pg_dir, SWAPPER_SIZE,
					  (uintptr_t)tramp_pg_dir, PAGE_SIZE);
#else
	ret = hkip_initialize_sys_reg((uintptr_t)swapper_pg_dir, SWAPPER_SIZE,
					  0, 0);
#endif

	return ret;
}
module_init(hkip_init);

static int hkip_report_alarm;
static int sdei_hash_callback(u32 event, struct pt_regs *regs, void *arg)
{
	/*
	 * Not aiming for accuracy, because whichever event we catch
	 * it's going to be fatal
	 */
	sdei_event = event;
	hkip_report_alarm = 1;

	return 0;
}
NOKPROBE_SYMBOL(sdei_hash_callback);

int hkip_schedule_alarm(void *unused)
{
	while (!kthread_should_stop()) {
		if (hkip_report_alarm == 1) {
			schedule_work(&sdei_wq);
			hkip_report_alarm = 0;
		}
		msleep(1000);
	}

	return 0;
}

/* to remap the pud/pmd/pte entries.. */
static void __remap_ro_pgtable(uintptr_t addr)
{
	pgd_t *pgd;
	p4d_t *p4dp;

	pr_debug("pgd = %pK\n", (void *)init_mm.pgd);
	pgd = pgd_offset(&init_mm, addr);
	pr_debug("[%pK] *pgd=%016llx", (void *)addr, pgd_val(*pgd));

	do {
		pud_t *pud;
		pmd_t *pmd;
		pte_t *pte;
		pteval_t entry;
		p4d_t p4d;

		p4dp = &p4d;

		if (!(pgd_val(*pgd) & 2))
			break;

		p4d.pgd = *pgd;
		pud = pud_offset(p4dp, addr);
		pr_debug(", *pud=%016llx", pud_val(*pud));
		if (pud_none(*pud) || pud_bad(*pud))
			break;
		BUG_ON(!pud_table(*pud));

		pmd = pmd_offset(pud, addr);
		pr_debug(", *pmd=%016llx", pmd_val(*pmd));
		if (pmd_none(*pmd) || pmd_bad(*pmd))
			break;
		BUG_ON(!pmd_table(*pmd));

		pte = pte_offset_map(pmd, addr);
		entry = pte_val(*pte);
		entry ^= PTE_WRITE;
		entry |= PTE_RDONLY;

		BUG_ON(invoke_smc_fn(HKIP_PTE_WRITE, (uintptr_t)pte, entry, 0));
		pr_debug(", *pte(%pK)=%016llx\n", (void *)pte, pte_val(*pte));
		pte_unmap(pte);

		return;
	} while (0);
}

static BLOCKING_NOTIFIER_HEAD(hkip_warning_chain);

/* define hkip warnig notifier_call_chain */
static int call_hkip_notifiers(unsigned long val, void *v)
{
	return blocking_notifier_call_chain(&hkip_warning_chain, val, v);
}

/* define hkip warnig notifier_chain_register func */
int register_hkip_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&hkip_warning_chain, nb);
}
EXPORT_SYMBOL(register_hkip_notifier);

/* define hkip warnig notifier_chain_unregister func */
int unregister_hkip_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&hkip_warning_chain, nb);
}
EXPORT_SYMBOL(unregister_hkip_notifier);

static void sdei_process_event(struct work_struct *work)
{
	pr_warn("%u: SDEI work %d\n", smp_processor_id(), sdei_event);
	switch (sdei_event) {
	case ARM_SDEI_SYSREGS_EVENT:
		call_hkip_notifiers(
			ARM_SDEI_SYSREGS_EVENT,
			"[HKIP WARN]: System registers have been changed!\n");

		pr_err("[HKIP WARN]: System registers have been changed!\n");
		break;

#ifndef CONFIG_EULEROS_HKIP_NON_FATAL
	case ARM_SDEI_CODEHASH_EVENT:
		call_hkip_notifiers(
			ARM_SDEI_CODEHASH_EVENT,
			"[HKIP WARN]: Kernel code/RO data has been changed!\n");

		pr_err("[HKIP WARN]: Kernel code/RO data has been changed!\n");
		break;
#endif

	case ARM_SDEI_PGD_EVENT:
		call_hkip_notifiers(
			ARM_SDEI_PGD_EVENT,
			"[HKIP WARN]: Kernel has a invalid mapping in page tables\n");

		pr_err("[HKIP WARN]: Kernel has a invalid mapping in page tables\n");
		break;

	case ARM_SDEI_MODULE_EVENT:
		call_hkip_notifiers(ARM_SDEI_MODULE_EVENT,
					"[HKIP WARN]: New kernel module loaded\n");

		pr_debug("[HKIP WARN]: New kernel module loaded\n");
		break;

	default:
		pr_err("Unknown event! id: %d\n", sdei_event);
	}
}

static int hkip_enable_sdei(void)
{
	int ret;

	ret = sdei_event_enable(ARM_SDEI_SYSREGS_EVENT);
	if (ret) {
		pr_err("SDEI enable sysregs check for HKIP on cpu%d failed, ret = %d\n",
			   smp_processor_id(), ret);

		return ret;
	}

#ifndef CONFIG_EULEROS_HKIP_NON_FATAL
	ret = sdei_event_enable(ARM_SDEI_CODEHASH_EVENT);
	if (ret) {
		pr_err("SDEI enable code hash for HKIP on cpu%d failed, ret = %d\n",
			   smp_processor_id(), ret);

		return ret;
	}
#endif

	ret = sdei_event_enable(ARM_SDEI_PGD_EVENT);
	if (ret) {
		pr_err("SDEI enable pgd scan for HKIP on cpu%d failed, ret = %d\n",
			   smp_processor_id(), ret);

		return ret;
	}

#ifndef CONFIG_EULEROS_HKIP_NON_FATAL
	ret = sdei_event_enable(ARM_SDEI_MODULE_EVENT);
	if (ret) {
		pr_err("SDEI enable module supervision for HKIP on cpu%d failed, ret = %d\n",
			   smp_processor_id(), ret);

		return ret;
	}
#endif

	return 0;
}

static int hkip_register_sdei(void)
{
	int ret;

	INIT_WORK(&sdei_wq, sdei_process_event);

	ret = sdei_event_register(ARM_SDEI_SYSREGS_EVENT, sdei_hash_callback,
				  NULL);
	if (ret) {
		pr_err("SDEI register callback of sysregs check failed, ret = %d\n",
			   ret);

		return ret;
	}
	pr_info("SDEI register callback of sysregs check successfully\n");

#ifndef CONFIG_EULEROS_HKIP_NON_FATAL
	ret = sdei_event_register(ARM_SDEI_CODEHASH_EVENT, sdei_hash_callback,
				  NULL);
	if (ret) {
		pr_err("SDEI register callback of code hash failed, ret = %d\n",
			   ret);

		return ret;
	}
	pr_info("SDEI register callback of code hash successfully\n");
#endif

	ret = sdei_event_register(ARM_SDEI_PGD_EVENT, sdei_hash_callback, NULL);
	if (ret) {
		pr_err("SDEI register callback of pgd scan failed, ret = %d\n",
			   ret);

		return ret;
	}
	pr_info("SDEI register callback of pgd scan successfully\n");

#ifndef CONFIG_EULEROS_HKIP_NON_FATAL
	ret = sdei_event_register(ARM_SDEI_MODULE_EVENT, sdei_hash_callback,
				  NULL);
	if (ret) {
		pr_err("SDEI register callback of module supervision failed, ret = %d\n",
			   ret);

		return ret;
	}
	pr_info("SDEI register callback of module supervision successfully\n");
#endif

	ret = hkip_enable_sdei();

	return ret;
}

static int get_hkip_status(void *data, u64 *val)
{
	pr_info("get hkip status from sys\n");
	*val = hkip_status;

	return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_hkip_status, get_hkip_status, NULL, "%llu\n");

static struct dentry *hkip_init_secfs_status(void)
{
	struct dentry *dir = securityfs_create_dir("hkip", NULL);

	if (dir == NULL)
		return NULL;

	pr_info("hkip dir is created successfully\n");
	if (securityfs_create_file("status", 0400, dir, NULL,
				   &fops_hkip_status) == NULL)
		goto error;

	return dir;
error:
	pr_err("hkip/status init failed\n");
	securityfs_remove(dir);
	dir = NULL;

	return NULL;
}

static int hkip_secfs_init(void)
{
	struct dentry *debug_dir;

	pr_info("*** setup the securityfs for hkip status ***\n");
	debug_dir = hkip_init_secfs_status();
	if (debug_dir == NULL) {
		pr_err("securityfs init failed\n");
		return -ENOMEM;
	}

	return 0;
}

static int hkip_alarm_init(void)
{
	pr_info("this is hkip schedule alarm.\n");

	kthread = kthread_run(hkip_schedule_alarm, NULL, "hkipalarmthread");
	if (IS_ERR(kthread)) {
		pr_err("[HKIP] create hkip kthread failed.\n");
		return -1;
	}
	pr_info("trigger hkip schedule alarm succeed.\n");

	return 0;
}

int hkip_enable(void)
{
	phys_addr_t pgd;
	struct arm_smccc_res res;
	u32 ret;

	u64 kernel_start = __phys_addr_symbol((uintptr_t)__hash_start);
	u64 kernel_end = __phys_addr_symbol((uintptr_t)__hash_end);
	u64 swapper_pg = __phys_addr_symbol((uintptr_t)swapper_pg_dir);

	if (!hkip_status || !hkip_atf_status)
		return 0;

	hkip_register_sdei();

	pgd_remapped = true;
	for (pgd = ro_mem_block_start; pgd < ro_mem_block_end; pgd += PAGE_SIZE)
		__remap_ro_pgtable((uintptr_t)__va(pgd));

#ifndef CONFIG_EULEROS_HKIP_NON_FATAL
	remap_swapper_pg_dir();
#endif

	/* Scan the tables and set PXNtable bits everywhere */
	hkip_scan_init();

	hkip_module_start = true;
	atomic64_set(&hkip_dumpstack, 0);
	arm_smccc_smc(HKIP_TEXT_SETUP, kernel_start, kernel_end,
			  (uintptr_t)__hash_start, ro_mem_block_start,
			  ro_mem_block_end, 0, 0, &res);
	if (res.a0 == 0)
		pr_info("Kernel hash %lx-%lx\n", res.a1, res.a2);
	else
		pr_warn("HASH kernel answered: %ld", res.a0);

	ret = invoke_smc_fn(HKIP_SCAN_PGD, swapper_pg, (uintptr_t)__hash_start,
				(uintptr_t)__hash_end);

	hkip_secfs_init();
	hkip_alarm_init();

	pr_info("SPD scanned with 0x%x %d\n", ret, sdei_event);
	pr_info("HKIP is FULLY enabled\n");

	return 0;
}

phys_addr_t __init hkip_early_pgtable_alloc(void)
{
	phys_addr_t phys;

	if (hkip_enabled() && hkip_early_alloc) {
		if (!ro_mem_block_start) {
			/* We require it to be aligned to 2MB - section size */
			ro_mem_block_start = memblock_phys_alloc(HKIP_RO_POOL_SIZE,
				HKIP_RO_POOL_ALIGN);
			BUG_ON(!ro_mem_block_start);
			atomic64_set(&ro_mem_block, ro_mem_block_start);
			ro_mem_block_end = ro_mem_block_start
				+ HKIP_RO_POOL_SIZE;
		}
		if (atomic64_read(&ro_mem_block) < ro_mem_block_end) {
			phys = atomic64_fetch_add(PAGE_SIZE, &ro_mem_block);
		} else {
			pr_warn("ro-pool is full\n");
			BUG();
		}
	} else {
		phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
		if (!phys)
			panic("Failed to allocate page table page\n");
	}
	return phys;
}

void __init hkip_reserve_crashkernel_limit(void)
{
	if (hkip_enabled()) {
		arm64_dma_phys_limit = SZ_4G;
		pr_info("[HKIP] dma phys limit = %pK\n", (void *)CRASH_ADDR_LOW_MAX);
	}
}

int __pte_alloc_kernel_hkip(pmd_t *pmd, unsigned long address)
{
	pte_t *new;

	if (hkip_enabled() && MODULE_VMALLOC_ADDR(address)) {
		new = (pte_t *)hkip_ropool_pgtable_alloc();
		pr_debug("HKIP: Alloc PTE ro pool modules %pK %pK\n",
			(void *)address, (void *)new);
	} else {
		new = pte_alloc_one_kernel(&init_mm);
	}

	if (!new)
		return -ENOMEM;

	smp_wmb(); /* See comment in __pte_alloc */

	spin_lock(&init_mm.page_table_lock);
	if (likely(pmd_none(*pmd))) {   /* Has another populated it ? */
		pmd_populate_kernel(&init_mm, pmd, new);
		new = NULL;
	}
	spin_unlock(&init_mm.page_table_lock);
	if (new)
		pte_free_kernel(&init_mm, new);
	return 0;
}

#endif /* CONFIG_EULEROS_HKIP_SERVER */
