// SPDX-License-Identifier: GPL-2.0-only
/*
 *  Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
 *  Provide angel core method to monitor kernel start.
 */

#ifdef CONFIG_EULEROS_ANGEL_CORE_MONITOR
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/cpu.h>
#include <linux/seq_file.h>
#include <linux/crash_core.h>
#include <asm/debugreg.h>
#include <asm/apic.h>
#include <asm/hw_irq.h>
#include <asm/msr.h>
#include <asm/angel_core.h>

unsigned long angel_section_phys;
unsigned long angel_section_size;
struct angel_page_pool angel_page_pool;
unsigned long angel_section_len;
static long angel_timeout = DEFAULT_ANGEL_MONITOR_TIMEOUT_MS;

void wait_for_angel_core(unsigned int timeout_us)
{
	struct angel_core_section *section_phys = angel_core_section_phys();

	if (section_phys) {
		struct angel_core_section *section = angel_core_section_virt();

		if (section && section->cpu) {
			while (section && !section->comm && (timeout_us--))
				udelay(1);
		}
	}
}

static unsigned long alloc_one_page_reserved(struct angel_page_pool *pool)
{
	unsigned long start;

	if (!pool->start || !pool->end)
		return 0;
	start = PAGE_ALIGN(pool->start);
	if (start + PAGE_SIZE > pool->end)
		return 0;
	pool->start = start + PAGE_SIZE;
	return (unsigned long)__va(start);
}

static int do_mapping_4k_pooling(unsigned long vaddr, unsigned long paddr,
				pgd_t *pgd, struct angel_page_pool *pool)
{
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	int result = -ENOMEM;

	pgd += pgd_index(vaddr);
	if (!pgd_present(*pgd)) {
		p4d = (p4d_t *)alloc_one_page_reserved(pool);
		if (!p4d)
			goto err;
		set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
	}
	p4d = p4d_offset(pgd, vaddr);
	if (!p4d_present(*p4d)) {
		pud = (pud_t *)alloc_one_page_reserved(pool);
		if (!pud)
			goto err;
		set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
	}
	pud = pud_offset(p4d, vaddr);
	if (!pud_present(*pud)) {
		pmd = (pmd_t *)alloc_one_page_reserved(pool);
		if (!pmd)
			goto err;
		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
	}
	pmd = pmd_offset(pud, vaddr);
	if (!pmd_present(*pmd)) {
		pte = (pte_t *)alloc_one_page_reserved(pool);
		if (!pte)
			goto err;
		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
	}
	pte = pte_offset_kernel(pmd, vaddr);
	set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC_NOENC));
	return 0;
err:
	return result;
}
int build_transition_pgtable(unsigned long vaddr,
			unsigned long paddr,
			unsigned long size,
			unsigned long *pgtable)
{
	pgd_t *pgd;
	int ret;
	struct angel_page_pool *pool = &angel_page_pool;

	pgd = (pgd_t *)alloc_one_page_reserved(pool);
	if (pgd == NULL)
		return -ENOMEM;
	clear_page(pgd);

	ret = do_mapping_4k_pooling(vaddr, paddr, pgd, pool);
	if (ret)
		goto out;

	*pgtable = (unsigned long)__pa(pgd);
	return 0;
out:
	free_page((unsigned long)pgd);
	return ret;
}

#ifndef CONFIG_KASAN
static void clear_pgd_entry(unsigned long vaddr, pgd_t *pgd)
{
	pgd += pgd_index(vaddr);
	set_pgd(pgd, __pgd(0));
}

static int safe_code_install(struct angel_core_section *section,
			struct angel_core_section *section_phys)
{
	pgd_t *pgd;
	int ret;
	struct angel_page_pool *pool = &angel_page_pool;
	unsigned long paddr = angel_core_text_addr(section_phys);
	unsigned long orig_cr3 = __read_cr3();

	pgd = (pgd_t *)alloc_one_page_reserved(pool);
	if (pgd == NULL)
		return -ENOMEM;
	memcpy((void *)pgd, (void *)init_mm.pgd, PAGE_SIZE);
	clear_pgd_entry(angel_core_text_addr(section), pgd);

	ret = do_mapping_4k_pooling(paddr, paddr, pgd, pool);
	if (ret)
		return ret;
	spin_lock(&init_mm.page_table_lock);
	write_cr3((unsigned long)__pa(pgd));
	memcpy((void *)paddr, real_angel_loop, PAGE_SIZE);
	write_cr3(orig_cr3);
	spin_unlock(&init_mm.page_table_lock);
	return 0;
}
#else
static int safe_code_install(struct angel_core_section *section,
			struct angel_core_section *section_phys)
{
	unsigned long vaddr = angel_core_text_addr(section);

	memcpy((void *)vaddr, real_angel_loop, PAGE_SIZE);

	return 0;
}
#endif

int build_angel_core_section(unsigned int cpu,
			struct angel_core_section *section,
			struct angel_core_section *section_phys)
{
	int ret;

	memset((void *)section, 0, angel_section_len);
	section->apicid = apic->cpu_present_to_apicid(0);
	section->boot_core = 0;
	section->phys = (unsigned long)section_phys;
	section->cpu = cpu;
	section->start = (unsigned long)section_phys;
	section->end = section->start + PAGE_SIZE;
	section->flag = 0;
	section->stack = (unsigned long)section +
		ANGEL_CORE_STACK_PAGE_OFFSET * PAGE_SIZE;
	section->comm = 0;
	section->retire = 0;
	/*
	 * angel_timeout value range: [500, 10000],
	 * tsc_khz is u32, angel_timeout is u64,
	 * angel_timeout * tsc_khz never overflow.
	 */
	section->timeout = angel_timeout * tsc_khz;

	if (x2apic_mode) {
		section->x2apic = 1;
		pr_info("x2apic detected, use x2apic in angel core");
	} else {
		section->x2apic = 0;
		pr_info("x2apic disabled, use flat in angel core");
	}
	ret = safe_code_install(section, section_phys);
	if (ret)
		return ret;
	ret = build_transition_pgtable((unsigned long)real_angel_loop,
				angel_core_text_addr(section_phys),
				PAGE_SIZE,
				&section->pgd);
	if (ret)
		return ret;
	ret = do_mapping_4k_pooling((unsigned long)section,
			(unsigned long)section_phys,
			(pgd_t *)__va(section->pgd),
			&angel_page_pool);
	if (ret)
		return ret;

	ret = do_mapping_4k_pooling((unsigned long)section + 2 * PAGE_SIZE,
			(unsigned long)section_phys + 2 * PAGE_SIZE,
			(pgd_t *)__va(section->pgd),
			&angel_page_pool);
	if (ret)
		return ret;

	ret = do_mapping_4k_pooling((unsigned long)section + 3 * PAGE_SIZE,
			(unsigned long)section_phys + 3 * PAGE_SIZE,
			(pgd_t *)__va(section->pgd),
			&angel_page_pool);
	if (ret)
		return ret;

	if (!x2apic_mode) {
		ret = do_mapping_4k_pooling((unsigned long)APIC_BASE,
				(unsigned long)mp_lapic_addr,
				(pgd_t *)__va(section->pgd),
				&angel_page_pool);
		if (ret)
			return ret;
	}
	return 0;
}

static __always_inline void x2apic_write_msr(unsigned int msr, u32 low, u32 high)
{
	asm volatile("1: wrmsr\n"
		     "2:\n"
		     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
		     : : "c" (msr), "a"(low), "d" (high) : "memory");
}

static __always_inline void x2apic_wrmsrl(unsigned int msr, u64 val)
{
	x2apic_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
}

static __always_inline void x2apic_icr_write(u32 low, u32 id)
{
	x2apic_wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
}

static __always_inline u32 flat_apic_read(u32 reg)
{
	return *((u32 *)(APIC_BASE + reg));
}
static __always_inline void flat_apic_write(u32 reg, u32 v)
{
	u32 *addr = (u32 *)(APIC_BASE + reg);

	alternative_io("movl %0, %P1", "xchgl %0, %P1", X86_BUG_11AP,
		       ASM_OUTPUT2("=r" (v), "=m" (*addr)),
		       ASM_OUTPUT2("0" (v), "m" (*addr)));
}

static __always_inline void flat_apic_icr_write(u32 low, u32 id)
{
	flat_apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
	flat_apic_write(APIC_ICR, low);
}

static __always_inline void ___delay(unsigned long loops)
{
	asm volatile(
		"test %0,%0\n"
		"jz 3f\n"
		"jmp 1f\n"

		".align 16\n"
		"1: jmp 2f\n"

		".align 16\n"
		"2: dec %0\n"
		" jnz 2b\n"
		"3: dec %0\n"

		: /* we don't need output */
		: "a" (loops)
	);
}

static __always_inline void ___udelay(unsigned long xloops)
{
	int d0;

	xloops = 4;
	asm("mull %%edx"
		: "=d" (xloops), "=&a" (d0)
		: "1" (xloops), "0"
		((1<<12) * (HZ/4)));

	___delay(++xloops);
}

static __always_inline u32 flat_safe_apic_wait_icr_idle(void)
{
	u32 send_status;
	int timeout;

	timeout = 0;
	do {
		send_status = flat_apic_read(APIC_ICR) & APIC_ICR_BUSY;
		if (!send_status)
			break;
		___udelay(100);
	} while (timeout++ < 1000);

	return send_status;
}

#define	SET_APIC_DEST_FIELD(x) ((x) << 24)
static __always_inline unsigned int __prepare_ICR(unsigned int shortcut, int vector,
					 unsigned int dest)
{
	unsigned int icr = shortcut | dest;

	switch (vector) {
	default:
		icr |= APIC_DM_FIXED | vector;
		break;
	case NMI_VECTOR:
		icr |= APIC_DM_NMI;
		break;
	}
	return icr;
}

static __always_inline
void __default_send_IPI_dest_field(unsigned int apicid, int vector, unsigned int dest)
{
	unsigned long cfg;

	flat_safe_apic_wait_icr_idle();
	flat_apic_write(0x310, 0);

	/*
	 * program the ICR
	 */
	cfg = __prepare_ICR(0, vector, dest);

	/*
	 * Send the IPI. The write to APIC_ICR fires this off.
	 */
	flat_apic_write(APIC_ICR, cfg);
}

static __always_inline void flat_send_IPI_mask(const struct cpumask *cpumask, int vector, u32 dm,
						unsigned int apicid)
{
	__default_send_IPI_dest_field(apicid, vector, dm);
}

static __always_inline int kick_bsp_nmi(int apicid, int boot_core, u32 dm, unsigned long x2apic)
{
	if (x2apic) {
		x2apic_icr_write(APIC_DM_NMI | dm, apicid);
		/* no need to wait for icr idle in x2apic */
		return 0;
	}
	flat_send_IPI_mask(cpumask_of(boot_core), NMI_VECTOR, dm, apicid);

	return 0;
}

unsigned long real_angel_loop(unsigned long pgd,
		unsigned long vaddr, unsigned long paddr)
{
	unsigned long long tsc;
	unsigned long long pre_tsc;
	unsigned long long threshold;
	struct angel_core_section *section;
	enum angel_state state;

	section = (struct angel_core_section *)vaddr;
	state = STATE_STANDBY;
	if (!section) {
		asm volatile("hlt":::"memory");
		return 0;
	}
	threshold = section->timeout;
#ifdef CONFIG_X86_32
	load_cr3(pgd);
#else
	write_cr3(pgd);
#endif
	section->flag = 0;
	pre_tsc = rdtsc();
	while (1) {
		switch (state) {
		case STATE_STANDBY:
			section->comm = 1;
			if (section->flag == ANGEL_WAKEUP) {
				state = STATE_OBSERVING;
				pre_tsc = rdtsc();
			}
			break;
		case STATE_OBSERVING:
			if (section->retire) {
				state = STATE_HALT;
				break;
			}
			if (section->flag == ANGEL_TOUCHED) {
				section->flag = ANGEL_UNTOUCHED;
				pre_tsc = rdtsc();
				break;
			}
			tsc = rdtsc();
			if ((tsc - pre_tsc) > threshold) {
				section->comm = ANGEL_COMM_ROLLBACK;
				section->cpu = 0;
				section->event = 1;
				asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc");
				kick_bsp_nmi((int)section->apicid, (int)section->boot_core,
						APIC_DEST_PHYSICAL, section->x2apic);
				state = STATE_HALT;
			}
			break;
		case STATE_HALT:
			asm volatile("hlt":::"memory");
			break;
		default:
			break;
		}
	}
	return 0;
}

int wake_angel_core(unsigned int cpu)
{
	struct angel_core_section *section;
	struct angel_core_section *section_phys = angel_core_section_phys();

	hw_breakpoint_disable();
	if (section_phys) {
		section = angel_core_section_virt();
		if (section->cpu && section->cpu == cpu)
			angel_loop(section->pgd, (unsigned long)section,
					(unsigned long)section_phys);
	}

	unreachable();
	return 0;
}

void __init set_angel_mem_area(unsigned long start, unsigned long len)
{
	angel_section_phys = start + RES_START_OFFSET;
	angel_section_len = len;
}

void __init init_angel_monitor_core(unsigned long start)
{
	if (start + PAGE_SIZE * ANGEL_CORE_POOL_START_PAGE_OFFSET > start)
		angel_page_pool.start = start + PAGE_SIZE * ANGEL_CORE_POOL_START_PAGE_OFFSET;
	if (start + PAGE_SIZE * ANGEL_CORE_POOL_END_PAGE_OFFSET > start)
		angel_page_pool.end = start + PAGE_SIZE * ANGEL_CORE_POOL_END_PAGE_OFFSET;
}

int angel_start_monitor(void)
{
	struct angel_core_section *section;
	struct angel_core_section *section_phys = angel_core_section_phys();

	if (!get_angel_core())
		return 0;

	if (section_phys) {
		section = angel_core_section_virt();
		if (section) {
			section->flag = ANGEL_WAKEUP;
			if (section->cpu) {
				set_angel_core(section->cpu);
				set_last_angel_timeout(section->timeout);
			}
			set_angel_core_event(&(section->event));
			return 0;
		}
	}
	return -EFAULT;
}

bool is_angel_core_section_valid(void)
{
	struct angel_core_section *section;
	struct angel_core_section *section_phys = angel_core_section_phys();

	if (section_phys) {
		section = angel_core_section_virt();
		if (!section->cpu || section->cpu == INVALID_ANGEL_CORE)
			return false;
		else
			return true;
	}
	return false;
}

int boot_angel_core(unsigned int cpu, struct angel_core_section *section,
			struct angel_core_section *section_phys)
{
	if (section)
		return build_angel_core_section(cpu, section, section_phys);
	return -EFAULT;
}

static DEFINE_MUTEX(angel_core_proc_lock);

unsigned int angel_core_show(void)
{
	unsigned int core;
	struct angel_core_section *section;
	struct angel_core_section *section_phys = angel_core_section_phys();

	if (section_phys) {
		section = angel_core_section_virt();
		core = get_angel_core();
		if (section && section->cpu == core)
			return core;
	}
	return INVALID_ANGEL_CORE;
}

unsigned int angel_timeout_show(void)
{
	return angel_timeout;
}

int angel_core_store(unsigned int angel_core)
{
	int err;
	struct angel_core_section *section;
	struct angel_core_section *section_phys = angel_core_section_phys();

	if (!is_kexec_extend_res_ready()) {
		pr_warn("Kexec retore res has not ready, can't set angel core.\n");
		return -EPERM;
	}
	if (!section_phys) {
		pr_warn("Angel core or cpu park section is null.\n");
		return -EPERM;
	}

	mutex_lock(&angel_core_proc_lock);
	section = angel_core_section_virt();
	if (section && !section->cpu)
		set_angel_core(INVALID_ANGEL_CORE);
	if (has_angel_core()) {
		mutex_unlock(&angel_core_proc_lock);
		pr_warn("Angel_core exist.\n");
		return -EFAULT;
	}

	if (!angel_core || !cpu_online(angel_core)) {
		mutex_unlock(&angel_core_proc_lock);
		pr_warn("Core %u is invalid\n", angel_core);
		return -EPERM;
	}
	set_angel_core(angel_core);
	err = boot_angel_core(angel_core, section, section_phys);
	if (err) {
		set_angel_core(INVALID_ANGEL_CORE);
		section->cpu = 0;
	}
	mutex_unlock(&angel_core_proc_lock);
	return err;
}

int angel_timeout_store(unsigned int timeout)
{
	struct angel_core_section *section;

	if (!angel_core_section_phys()) {
		pr_warn("Angel core section is null.\n");
		return -EPERM;
	}

	mutex_lock(&angel_core_proc_lock);
	section = angel_core_section_virt();
	if (section && !section->cpu)
		set_angel_core(INVALID_ANGEL_CORE);
	mutex_unlock(&angel_core_proc_lock);

	if (has_angel_core()) {
		pr_warn("The angel core has been set, timeout value cannot change.\n");
		return -EPERM;
	}

	if (timeout < MIN_ANGEL_MONITOR_TIMEOUT_MS ||
		timeout > MAX_ANGEL_MONITOR_TIMEOUT_MS) {
		pr_warn("Angel core is invalid.\n");
		return -EINVAL;
	}
	angel_timeout = timeout;
	return 0;
}

struct kup_monitor_operations angel_core_monitor_ops = {
	.kup_monitor_core_show = angel_core_show,
	.kup_monitor_core_store = angel_core_store,
	.kup_monitor_timeout_show = angel_timeout_show,
	.kup_monitor_timeout_store = angel_timeout_store,
};

struct kup_monitor_operations *get_angel_core_monitor_ops(void)
{
	return &angel_core_monitor_ops;
}

#endif /* CONFIG_EULEROS_ANGEL_CORE_MONITOR */
