// SPDX-License-Identifier: GPL-2.0-only
/*
 *  Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
 *  Provide angel core method to monitor kernel start.
 */
#ifdef CONFIG_EULEROS_ANGEL_CORE_MONITOR
#include <linux/mm.h>
#include <linux/memblock.h>
#include <asm/mmu_context.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/cpu.h>
#include <linux/seq_file.h>
#include <linux/crash_core.h>
#include <asm/angel_core.h>
#include <asm/arch_gicv3.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <asm/smp_plat.h>
#include <asm/arch_timer.h>
#include <asm/smp.h>
#include <asm/nmi.h>

unsigned long angel_section_phys;
unsigned long angel_section_size;
struct angel_page_pool angel_page_pool;
unsigned long angel_section_len;
static long angel_timeout = DEFAULT_ANGEL_MONITOR_TIMEOUT_MS;

void wait_for_angel_core(unsigned int timeout_us)
{
	struct angel_core_section *section_phys = angel_core_section_phys();

	if (section_phys) {
		struct angel_core_section *section = angel_core_section_virt();

		if (section && section->cpu) {
			while (section && !section->comm && (timeout_us--))
				udelay(1);
		}
	}
}

static unsigned long alloc_one_page_reserved(struct angel_page_pool *pool)
{
	unsigned long start;

	if (!pool->start || !pool->end)
		return 0;
	start = PAGE_ALIGN(pool->start);
	if (start + PAGE_SIZE > pool->end)
		return 0;
	pool->start = start + PAGE_SIZE;
	return (unsigned long)__va(start);
}

static int do_mapping_4k_pooling(unsigned long vaddr, unsigned long paddr,
				pgd_t *pgd, struct angel_page_pool *pool)
{
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	int result = -ENOMEM;

	pgd += pgd_index(vaddr);
	if (!pgd_present(*pgd)) {
		p4d = (p4d_t *)alloc_one_page_reserved(pool);
		if (!pud)
			goto err;
		set_pgd(pgd, __pgd(__pa(p4d) | PUD_TYPE_TABLE));
	}
	p4d = p4d_offset(pgd, vaddr);
	if (!p4d_present(*p4d)) {
		pud = (pud_t *)alloc_one_page_reserved(pool);
		if (!pud)
			goto err;
		set_p4d(p4d, __p4d(__pa(pud) | PUD_TYPE_TABLE));
	}
	pud = pud_offset(p4d, vaddr);
	if (!pud_present(*pud)) {
		pmd = (pmd_t *)alloc_one_page_reserved(pool);
		if (!pmd)
			goto err;
		set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
	}
	pmd = pmd_offset(pud, vaddr);
	if (!pmd_present(*pmd)) {
		pte = (pte_t *)alloc_one_page_reserved(pool);
		if (!pte)
			goto err;
		set_pmd(pmd, __pmd(__pa(pte) | PMD_TYPE_TABLE));
	}
	pte = pte_offset_kernel(pmd, vaddr);
	set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
	return 0;
err:
	return result;
}
int build_transition_pgtable(unsigned long vaddr,
			unsigned long paddr,
			unsigned long size,
			unsigned long *pgtable)
{
	pgd_t *pgd;
	int ret;
	struct angel_page_pool *pool = &angel_page_pool;

	pgd = (pgd_t *)alloc_one_page_reserved(pool);
	if (pgd == NULL)
		return -ENOMEM;
	clear_page(pgd);

	ret = do_mapping_4k_pooling(vaddr, paddr, pgd, pool);
	if (ret)
		goto out;

	*pgtable = (unsigned long)__pa(pgd);
	return 0;
out:
	free_page((unsigned long)pgd);
	return ret;
}

#ifndef CONFIG_KASAN
static inline void cpu_install_pgd(pgd_t *pgd)
{
	cpu_set_reserved_ttbr0();
	local_flush_tlb_all();
	cpu_set_idmap_tcr_t0sz();

	cpu_switch_mm(pgd, &init_mm);
}

static void clear_pgd_entry(unsigned long vaddr, pgd_t *pgd)
{
	pgd += pgd_index(vaddr);
	set_pgd(pgd, __pgd(0));
}

static int safe_code_install(struct angel_core_section *section,
			struct angel_core_section *section_phys)
{
	pgd_t *pgd;
	int ret;
	struct angel_page_pool *pool = &angel_page_pool;
	unsigned long paddr = angel_core_text_addr(section_phys);

	pgd = (pgd_t *)alloc_one_page_reserved(pool);
	if (pgd == NULL)
		return -ENOMEM;

	spin_lock(&init_mm.page_table_lock);
	memcpy((void *)pgd, (void *)init_mm.pgd, PAGE_SIZE);
	spin_unlock(&init_mm.page_table_lock);
	clear_pgd_entry(angel_core_text_addr(section), pgd);

	ret = do_mapping_4k_pooling(paddr, paddr, pgd, pool);
	if (ret)
		return ret;

	cpu_install_pgd(pgd);
	memset((void *)paddr, 0, PAGE_SIZE);
	memcpy((void *)paddr, real_angel_loop, PAGE_SIZE);
	__flush_dcache_area((void *)paddr, PAGE_SIZE);
	cpu_uninstall_idmap();

	return 0;
}
#else
static int safe_code_install(struct angel_core_section *section,
			struct angel_core_section *section_phys)
{
	unsigned long vaddr = angel_core_text_addr(section);

	memset((void *)vaddr, 0, PAGE_SIZE);
	memcpy((void *)vaddr, real_angel_loop, PAGE_SIZE);
	__flush_dcache_area((void *)vaddr, PAGE_SIZE);
	return 0;
}
#endif

int build_angel_core_section(unsigned int cpu,
			struct angel_core_section *section,
			struct angel_core_section *section_phys)
{
	int ret;
	unsigned long vaddr;
	unsigned int tsc_rate;

	vaddr = angel_core_text_addr(section);
	memset((void *)section, 0, vaddr - (unsigned long)section);
	memset((void *)(vaddr + PAGE_SIZE), 0, angel_section_len - 2 * PAGE_SIZE);
	section->boot_core = cpu_logical_map(0);
	section->phys = (unsigned long)section_phys;
	section->cpu = cpu;
	section->start = (unsigned long)section_phys;
	section->end = section->start + PAGE_SIZE;
	section->flag = 0;
	section->stack = (unsigned long)section_phys +
		ANGEL_CORE_STACK_PAGE_OFFSET * PAGE_SIZE;
	section->comm = 0;
	section->retire = 0;
	section->nmi_id = get_angel_rollback_ipi_id();
	/*
	 * angel_timeout value range: [500, 10000],
	 * tsc_khz is u32, angel_timeout is u64,
	 * angel_timeout * tsc_khz never overflow.
	 */
	tsc_rate = arch_timer_get_cntfrq();
	section->timeout = angel_timeout * tsc_rate / 1000;
	pr_debug("Angel core timeout : %ld\n", section->timeout);

	ret = safe_code_install(section, section_phys);
	if (ret)
		return ret;

	ret = build_transition_pgtable((unsigned long)real_angel_loop,
				angel_core_text_addr(section_phys),
				PAGE_SIZE,
				&section->pgd);
	if (ret)
		return ret;

	ret = do_mapping_4k_pooling((unsigned long)section,
			(unsigned long)section_phys,
			(pgd_t *)__va(section->pgd),
			&angel_page_pool);
	if (ret)
		return ret;

	ret = do_mapping_4k_pooling((unsigned long)section + 2 * PAGE_SIZE,
			(unsigned long)section_phys + 2 * PAGE_SIZE,
			(pgd_t *)__va(section->pgd),
			&angel_page_pool);
	if (ret)
		return ret;

	ret = do_mapping_4k_pooling((unsigned long)section + 3 * PAGE_SIZE,
			(unsigned long)section_phys + 3 * PAGE_SIZE,
			(pgd_t *)__va(section->pgd),
			&angel_page_pool);
	if (ret)
		return ret;

	return 0;
}

#define MPIDR_RS(mpidr)					(((mpidr) & 0xF0UL) >> 4)
#define MPIDR_TO_SGI_RS(mpidr)			(MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
#define MPIDR_TO_SGI_CLUSTER_ID(mpidr)	((mpidr) & ~0xFUL)

#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
		(MPIDR_AFFINITY_LEVEL(cluster_id, level) \
		 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)


static inline void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
{
	u64 val;

	val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3)		|
			MPIDR_TO_SGI_AFFINITY(cluster_id, 2)	|
			irq << ICC_SGI1R_SGI_ID_SHIFT			|
			MPIDR_TO_SGI_AFFINITY(cluster_id, 1)	|
			MPIDR_TO_SGI_RS(cluster_id)				|
			tlist << ICC_SGI1R_TARGET_LIST_SHIFT);

	gic_write_sgi1r(val);
}

static inline void angel_send_IPI(unsigned long long physicalcpu, unsigned int irq)
{
	unsigned long long cluster_id = MPIDR_TO_SGI_CLUSTER_ID(physicalcpu);
	unsigned short tlist = 0;

	if (WARN_ON(irq >= 16))
		return;
	/*
	 * Ensure that stores to Normal memory are visible to the
	 * other CPUs before issuing the IPI.
	 */
	wmb();

	tlist |= 1 << (physicalcpu & 0xf);
	gic_send_sgi(cluster_id, tlist, irq);

	/* Force the above writes to ICC_SGI1R_EL1 to be executed */
	isb();
}

#define arch_counter_enforce_ordering(val) do {				\
	u64 tmp, _val = (val);						\
									\
	asm volatile(							\
	"	eor	%0, %1, %1\n"					\
	"	add	%0, sp, %0\n"					\
	"	ldr	xzr, [%0]"					\
	: "=r" (tmp) : "r" (_val));					\
} while (0)

static inline unsigned long long arm64_rdtsc(void)
{
	unsigned long long cnt;

	isb();
	cnt = read_sysreg(cntvct_el0);
	arch_counter_enforce_ordering(cnt);
	return cnt;
}

static inline void cpu_set_ttbr1(pgd_t *pgdp)
{
	typedef void (ttbr_replace_func)(phys_addr_t);
	extern ttbr_replace_func idmap_cpu_replace_ttbr1;
	ttbr_replace_func *replace_phys;
	phys_addr_t pgd_phys = virt_to_phys(pgdp);

	replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
	cpu_install_idmap();
	replace_phys(pgd_phys);
	isb();
}

asmlinkage unsigned long
real_angel_loop(unsigned long pgd, unsigned long vaddr, unsigned long paddr)
{
	unsigned long long tsc;
	unsigned long long pre_tsc;
	unsigned long long threshold;
	struct angel_core_section *section = (struct angel_core_section *)vaddr;
	enum angel_state state = STATE_STANDBY;

	if (!section) {
		asm volatile("yield":::"memory");
		return 0;
	}

	threshold = section->timeout;
	section->flag = 0;

	pre_tsc = arm64_rdtsc();
	while (1) {
		switch (state) {
		case STATE_STANDBY:
			section->comm = 1;
			if (section->flag == ANGEL_WAKEUP) {
				state = STATE_OBSERVING;
				pre_tsc = arm64_rdtsc();
			}
			break;
		case STATE_OBSERVING:
			if (section->retire) {
				state = STATE_HALT;
				break;
			}
			if (section->flag == ANGEL_TOUCHED) {
				section->flag = ANGEL_UNTOUCHED;
				pre_tsc = arm64_rdtsc();
				break;
			}

			tsc = arm64_rdtsc();
			if ((tsc - pre_tsc) > threshold) {
				section->comm = ANGEL_COMM_ROLLBACK;
				section->event = 1;
				angel_send_IPI(section->boot_core, section->nmi_id);
				state = STATE_HALT;
			}
			break;
		case STATE_HALT:
			asm volatile("yield":::"memory");
			break;
		default:
			break;
		}
	}
	return 0;
}

int wake_angel_core(unsigned int cpu)
{
	struct angel_core_section *section;
	struct angel_core_section *section_phys = angel_core_section_phys();
	typeof(angel_loop) *angel_loops;

	if (section_phys) {
		section = angel_core_section_virt();
		angel_loops = (void *)__pa_symbol(angel_loop);

		if (section->cpu && section->cpu == cpu) {
			cpu_install_idmap();
			angel_loops(section->pgd, (unsigned long)section_phys,
					angel_core_text_addr(section_phys));
		}
	}

	unreachable();
	return 0;
}

void __init set_angel_mem_area(unsigned long start, unsigned long len)
{
	angel_section_phys = start + RES_START_OFFSET;
	angel_section_len = len - RES_START_OFFSET;
}

void __init init_angel_monitor_core(unsigned long start)
{
	if (start + PAGE_SIZE * ANGEL_CORE_POOL_START_PAGE_OFFSET > start)
		angel_page_pool.start = start + PAGE_SIZE * ANGEL_CORE_POOL_START_PAGE_OFFSET;
	if (start + PAGE_SIZE * ANGEL_CORE_POOL_END_PAGE_OFFSET > start)
		angel_page_pool.end = start + PAGE_SIZE * ANGEL_CORE_POOL_END_PAGE_OFFSET;
}

int angel_start_monitor(void)
{
	struct angel_core_section *section;
	struct angel_core_section *section_phys = angel_core_section_phys();

	if (!get_angel_core())
		return 0;

	if (section_phys) {
		section = angel_core_section_virt();
		if (section) {
			section->flag = ANGEL_WAKEUP;
			if (section->cpu) {
				set_angel_core(section->cpu);
				set_last_angel_timeout(section->timeout);
				dsb(sy);
			}
			set_angel_core_event(&(section->event));
			return 0;
		}
	}
	return -EFAULT;
}

bool is_angel_core_section_valid(void)
{
	struct angel_core_section *section;
	struct angel_core_section *section_phys = angel_core_section_phys();

	if (section_phys) {
		section = angel_core_section_virt();
		if (!section->cpu || section->cpu == INVALID_ANGEL_CORE)
			return false;
		else
			return true;
	}
	return false;
}

int boot_angel_core(unsigned int cpu, struct angel_core_section *section,
			struct angel_core_section *section_phys)
{
	if (section)
		return build_angel_core_section(cpu, section, section_phys);
	return -EFAULT;
}

static DEFINE_MUTEX(angel_core_proc_lock);

unsigned int angel_core_show(void)
{
	unsigned int core;
	struct angel_core_section *section;
	struct angel_core_section *section_phys = angel_core_section_phys();

	if (section_phys) {
		section = angel_core_section_virt();
		core = get_angel_core();
		if (section && section->cpu == core)
			return core;
	}
	return INVALID_ANGEL_CORE;
}

unsigned int angel_timeout_show(void)
{
	return angel_timeout;
}

int angel_core_store(unsigned int angel_core)
{
	int err;
	struct angel_core_section *section;
	struct angel_core_section *section_phys = angel_core_section_phys();

	if (!is_kexec_extend_res_ready()) {
		pr_warn("Kexec retore res has not ready, can't set angel core.\n");
		return -EPERM;
	}

	/* Under the arm64 architecture, the slave core cannot be running individually,
	 * so the anglecore function must be used together with the smp cpu park function */
	if (!section_phys || !park_info.start_v) {
		pr_warn("Angel core or cpu park section is null.\n");
		return -EPERM;
	}

	mutex_lock(&angel_core_proc_lock);
	section = angel_core_section_virt();
	if (section && !section->cpu)
		set_angel_core(INVALID_ANGEL_CORE);
	if (has_angel_core()) {
		mutex_unlock(&angel_core_proc_lock);
		pr_warn("Angel_core exist.\n");
		return -EFAULT;
	}

	if (!angel_core || !cpu_online(angel_core)) {
		mutex_unlock(&angel_core_proc_lock);
		pr_warn("Core %u is invalid\n", angel_core);
		return -EPERM;
	}

	set_angel_core(angel_core);
	err = boot_angel_core(angel_core, section, section_phys);
	if (err) {
		set_angel_core(INVALID_ANGEL_CORE);
		section->cpu = 0;
	}
	mutex_unlock(&angel_core_proc_lock);
	return err;
}

int angel_timeout_store(unsigned int timeout)
{
	struct angel_core_section *section;

	if (!angel_core_section_phys()) {
		pr_warn("Angel core section is null.\n");
		return -EPERM;
	}

	mutex_lock(&angel_core_proc_lock);
	section = angel_core_section_virt();
	if (section && !section->cpu)
		set_angel_core(INVALID_ANGEL_CORE);
	mutex_unlock(&angel_core_proc_lock);

	if (has_angel_core()) {
		pr_warn("The angel core has been set, timeout value cannot change.\n");
		return -EPERM;
	}

	if (timeout < MIN_ANGEL_MONITOR_TIMEOUT_MS ||
		timeout > MAX_ANGEL_MONITOR_TIMEOUT_MS) {
		pr_warn("Angel core timeout set is out of range.\n");
		return -EINVAL;
	}

	angel_timeout = timeout;
	return 0;
}

struct kup_monitor_operations angel_core_monitor_ops = {
	.kup_monitor_core_show = angel_core_show,
	.kup_monitor_core_store = angel_core_store,
	.kup_monitor_timeout_show = angel_timeout_show,
	.kup_monitor_timeout_store = angel_timeout_store,
};

struct kup_monitor_operations *get_angel_core_monitor_ops(void)
{
	return &angel_core_monitor_ops;
}

#endif /* CONFIG_EULEROS_ANGEL_CORE_MONITOR */
