// SPDX-License-Identifier: GPL-2.0-only
/*
 * quick_kexec.c - quick kexec self-developed interfaces
 * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/file.h>
#include <linux/security.h>
#include <linux/kexec.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/syscalls.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/efi.h>
#include <linux/proc_fs.h>
#include <linux/memblock.h>
#include <crypto/sha2.h>
#include <linux/ioport.h>

#include "kexec_internal.h"

#ifdef CONFIG_EULEROS_ANGEL_CORE_MONITOR
#include <asm/angel_core.h>
#endif

static bool smp_init_finished;
static int quick_rollback_enable;
static int quick_rollback_ability;
static int kexec_restore_succ;
int kexec_restore_res_enable __read_mostly;

bool is_smp_init_finished(void)
{
	return smp_init_finished;
}

void set_smp_init_finished(bool init_finished)
{
	smp_init_finished = init_finished;
}

void set_quick_rollback_ability(int is_enable)
{
	quick_rollback_ability = is_enable;
}

bool has_quick_rollback_ability(void)
{
	return quick_rollback_ability ? true : false;
}

static int __init enable_kexec_restore_res(char *str)
{
	kexec_restore_res_enable = 1;
	return 0;
}
early_param("kexec_restore", enable_kexec_restore_res);

bool is_kexec_restore_res_enable(void)
{
	return kexec_restore_res_enable ? true : false;
}

bool is_quick_rollback_enable(void)
{
	return quick_rollback_enable ? true : false;
}

bool is_kexec_restore_succ(void)
{
	return kexec_restore_succ ? true : false;
}

static char *kexec_res_names[KEXEC_RES_RESTORE_NUM] = {
	QUICK_ROLLBACK_RES_NAME, ANGEL_MONITOR_RES_NAME, CPU_PARK_RES_NAME};
static struct resource kexec_extend_res_array[KEXEC_MAX_EXTEND_RES_NUM];
static unsigned int kexec_extend_index;

/* The function is called before smp_init, so there exist no concurrent problem. */
struct resource *get_kexec_extend_res(char *res_name)
{
	unsigned int i;

	for (i = 0; i < kexec_extend_index; i++) {
		if (!strcmp(kexec_extend_res_array[i].name, res_name))
			return &(kexec_extend_res_array[i]);
	}
	return NULL;
}

void request_kexec_extend_res(struct resource *res)
{
	unsigned int i;

	for (i = 0; i < kexec_extend_index; i++) {
		if (kexec_extend_res_array[i].start >= res->start &&
			kexec_extend_res_array[i].end <= res->end)
			request_resource(res, &kexec_extend_res_array[i]);
	}
}

/* The function is called before smp_init, so there exist no concurrent problem. */
int __init get_kexec_extend_res_space(struct resource **res)
{
	struct resource *new;

	if (!res)
		return -EINVAL;
	if (kexec_extend_index >= KEXEC_MAX_EXTEND_RES_NUM) {
		pr_warn("Add kexec extend res fail.\n");
		return -ENOMEM;
	}
	new = &(kexec_extend_res_array[kexec_extend_index]);
	kexec_extend_index++;
	*res = new;
	return 0;
}

/* The function is called before smp_init, so there exist no concurrent problem. */
void __init free_kexec_extend_res_space(void)
{
	kexec_extend_index--;
}

/* The function is called before smp_init, so there exist no concurrent problem. */
static void __init add_kexec_extend_res(struct kexec_extend_res_area *extend_res)
{
	unsigned int i;
	struct resource *res;

	if (get_kexec_extend_res_space(&res))
		return;

	/* find const name string for extend_res */
	for (i = 0; i < KEXEC_RES_RESTORE_NUM; i++) {
		if (!strcmp(extend_res->res_name, kexec_res_names[i]))
			break;
	}
	if (i == KEXEC_RES_RESTORE_NUM) {
		kexec_extend_index--;
		pr_warn("Find no match kexec extend res name:%s.\n", extend_res->res_name);
		return;
	}

	res->start = extend_res->start;
	res->end = extend_res->end;
	res->name = kexec_res_names[i];
	res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
	res->desc = IORES_DESC_QUICK_KEXEC;
#ifdef CONFIG_X86
	insert_resource(&iomem_resource, res);
#endif
	pr_info("add kexec extend resource:%s succ.\n", res->name);
}

static void __init read_kexec_extend_res(unsigned long start)
{
	int ret;
	unsigned int i;
	unsigned char hash[SHA256_DIGEST_SIZE] = {0};
	struct kexec_extend_res *extend_res = (struct kexec_extend_res *)start;

	if (extend_res->magic != KEXEC_EXTEND_RES_MAGIC) {
		pr_warn("kexec extend res magic match error:%lx!\n", extend_res->magic);
		return;
	}
	generate_sha_digest((unsigned char const *)(&extend_res->res_num),
		sizeof(struct kexec_extend_res) - KEXEC_RES_NUM_OFFSET, hash);
	if (memcmp(extend_res->digest, hash, SHA256_DIGEST_SIZE)) {
		pr_warn("sha256 sum of kexec extend res error!\n");
		return;
	}
	pr_emerg("kexec extend res sha256 match succ!\n");
	for (i = 0; i < extend_res->res_num; i++) {
		if (!memblock_is_region_memory(extend_res->area[i].start,
		    extend_res->area[i].end - extend_res->area[i].start)) {
			pr_warn("Extend memblock:%u region is not memory!\n", i);
			continue;
		}

		if (memblock_is_region_reserved(extend_res->area[i].start,
		    extend_res->area[i].end - extend_res->area[i].start)) {
			pr_warn("Extend memblock:%u region overlaps reserved memory!\n", i);
			continue;
		}
		ret = memblock_reserve(extend_res->area[i].start,
			extend_res->area[i].end - extend_res->area[i].start);
		if (ret) {
			pr_err("%s: Error reserving memblock for res:%s.\n", __func__,
				extend_res->area[i].res_name);
			continue;
		}

		add_kexec_extend_res(&(extend_res->area[i]));
	}
	memset(extend_res, 0, sizeof(struct kexec_extend_res));
	pr_info("kexec extend res has been done\n");
	quick_rollback_enable = 1;
	kexec_restore_succ = 1;
}

static void __init read_kexec_extend_res_by_mmap(u64 mem_start)
{
	void *p;
	unsigned long slop;

	slop = offset_in_page(mem_start);
	p = early_memremap(mem_start & PAGE_MASK, slop + sizeof(struct kexec_extend_res));
	if (!p)
		return;
	pr_info("kexec extend res by mmap");
	read_kexec_extend_res((unsigned long)p + slop);
	early_memunmap(p, sizeof(struct kexec_extend_res));
}

void __init restore_kexec_extend_res(u64 res_start)
{
#ifdef CONFIG_X86
	int ret;
	unsigned long res_len = PAGE_ALIGN(sizeof(struct kexec_extend_res));

	if (!memblock_is_region_memory(res_start, res_len))
		return;
	if (memblock_is_region_reserved(res_start, res_len))
		return;

	/* reserve quick kexec extend res segment */
	ret = memblock_reserve(res_start, res_len);
	if (ret) {
		pr_warn("reserve for restore kexec res fail\n");
		return;
	}

	if (pfn_range_is_mapped(PFN_DOWN(res_start),
		PFN_DOWN(res_start + sizeof(struct kexec_extend_res)))) {
		res_start = res_start + PAGE_OFFSET;
		read_kexec_extend_res(res_start);
	} else {
		read_kexec_extend_res_by_mmap(res_start);
	}
#else
	read_kexec_extend_res_by_mmap(PAGE_ALIGN(res_start));
#endif
#ifdef CONFIG_X86
	memblock_free(res_start, res_len);
#endif
}

void generate_sha_digest(const unsigned char *string, unsigned long size, unsigned char *digest)
{
	struct sha256_state state;

	sha256_init(&state);
	sha256_update(&state, string, size);
	sha256_final(&state, digest);
}

int fill_kexec_extend_res(void *start, unsigned int is_rollback_segments)
{
	unsigned int i, j, len;
	void *virt_start;
	struct kexec_extend_res *extend_res = (struct kexec_extend_res *)start;

	memset(extend_res, 0, sizeof(struct kexec_extend_res));
	extend_res->magic = KEXEC_EXTEND_RES_MAGIC;
	j = 0;
	/*
	 * kexec_extend_index indicates the max member number of kexec_extend_res_array.
	 * kexec_extend_index and kexec_extend_res_array cannot be changed after smp_init.
	 * There exist no concurrent problem.
	 */
	for (i = 0; i < kexec_extend_index; i++) {
		if (!kexec_extend_res_array[i].name) {
			pr_warn("extend_res res name is null, kexec_extend_index:%u\n",
					kexec_extend_index);
			return -EFAULT;
		}

		len = strlen(kexec_extend_res_array[i].name);
		if (len > (RES_NAME_MAX_LEN - 1)) {
			pr_warn("kexec extend resource name(%s) longer than:%u.\n",
				kexec_extend_res_array[i].name, RES_NAME_MAX_LEN - 1);
			continue;
		}
		if (kexec_extend_res_array[i].end < kexec_extend_res_array[i].start + PAGE_SIZE - 1)
			continue;

		if (is_rollback_segments &&
		    strcmp(kexec_extend_res_array[i].name, ANGEL_MONITOR_RES_NAME))
			continue;

		strncpy(extend_res->area[j].res_name, kexec_extend_res_array[i].name, len);
		extend_res->area[j].res_name[len] = 0;
		extend_res->area[j].start = kexec_extend_res_array[i].start;
		extend_res->area[j].end = kexec_extend_res_array[i].end;
		/* the reserved memblock size always larger than PAGE_SIZE */
		virt_start = phys_to_virt(extend_res->area[j].start);
		if (virt_start && !is_rollback_segments)
			memset(virt_start, 0, PAGE_SIZE);
		pr_info("fill extend_res: %s\n", extend_res->area[j].res_name);
		j++;
	}
	extend_res->res_num = j;
	generate_sha_digest((unsigned char const *)(&extend_res->res_num),
		sizeof(struct kexec_extend_res) - KEXEC_RES_NUM_OFFSET, extend_res->digest);
	quick_rollback_enable = 0;
	return 0;
}

int kexec_extend_res_ready;

int is_kexec_extend_res_ready(void)
{
	return kexec_extend_res_ready;
}

void set_kexec_extend_res_ready(void)
{
	kexec_extend_res_ready = 1;
}

int fill_extend_res_in_segments(unsigned long nr_segments,
		struct kexec_segment *segments, unsigned int is_rollback_segments)
{
	int ret;
	void *virt_start;
	unsigned long extend_res_size;
	unsigned long index;

	if (nr_segments - 1 < SEGMENT_INITRD_INDEX_OFFSET) {
		pr_warn("There exist too few nr_segments.\n");
		return -EFAULT;
	}
	index = nr_segments - 1 - SEGMENT_INITRD_INDEX_OFFSET;
	if (index >= nr_segments)
		return -EFAULT;
	extend_res_size = sizeof(struct kexec_extend_res);
	if ((segments[index].memsz <= segments[index].bufsz)
		|| (segments[index].memsz - PAGE_ALIGN(segments[index].bufsz) < extend_res_size)) {
		pr_warn("There exist no sufficient space for kexec extend res:%lx.\n",
				extend_res_size);
		return -EFAULT;
	}
	virt_start = phys_to_virt(segments[index].mem + PAGE_ALIGN(segments[index].bufsz));
	if (virt_start) {
		ret = fill_kexec_extend_res(virt_start, is_rollback_segments);
		if (ret)
			return ret;
		pr_info("kexec_extend_res_ready!\n");
		set_kexec_extend_res_ready();
		return 0;
	}

	return -EFAULT;
}

static ssize_t quick_rollback_enable_show(struct kobject *kobj,
				  struct kobj_attribute *attr, char *buf)
{
	return sprintf(buf, "%d\n", is_quick_rollback_enable() &&
			has_quick_rollback_ability() ? 1 : 0);
}
static ssize_t quick_rollback_enable_store(struct kobject *kobj,
				   struct kobj_attribute *attr,
				   const char *buf, size_t count)
{
	int enable_val;
	int err;
	struct resource *res;
	struct kexec_rollback_image_state *image_state;

	err = kstrtoint(buf, 0, &enable_val);
	if (err)
		return -EINVAL;
	if (enable_val)
		return -EPERM;

	quick_rollback_enable = 0;
	res = get_quick_rollback_res();
	if (res) {
		/* clear quick rollback data */
		image_state = (struct kexec_rollback_image_state *)((unsigned long)
				phys_to_virt(res->start) + RES_START_OFFSET);
		if (image_state)
			memset(image_state, 0, sizeof(struct kexec_rollback_image_state));
	}
	return count;
}

static struct kobj_attribute quick_rollback_enable_attr =
	__ATTR(quick_rollback_enable, 0600, quick_rollback_enable_show,
			quick_rollback_enable_store);

#ifdef CONFIG_EULEROS_ANGEL_CORE_MONITOR
struct kup_monitor_operations kup_monitor_ops;

void register_kup_monitor_operations(struct kup_monitor_operations *ops)
{
	kup_monitor_ops.kup_monitor_core_show = ops->kup_monitor_core_show;
	kup_monitor_ops.kup_monitor_core_store = ops->kup_monitor_core_store;
	kup_monitor_ops.kup_monitor_timeout_show = ops->kup_monitor_timeout_show;
	kup_monitor_ops.kup_monitor_timeout_store = ops->kup_monitor_timeout_store;
}

static ssize_t kup_monitor_core_show(struct kobject *kobj,
				  struct kobj_attribute *attr, char *buf)
{
	unsigned int core = 0;

	if (kup_monitor_ops.kup_monitor_core_show)
		core = kup_monitor_ops.kup_monitor_core_show();
	return sprintf(buf, "0x%x\n", (core == 0) ? INVALID_ANGEL_CORE : core);
}
static ssize_t kup_monitor_core_store(struct kobject *kobj,
				   struct kobj_attribute *attr,
				   const char *buf, size_t count)
{
	int err;
	unsigned int core;

	err = kstrtouint(buf, 0, &core);
	if (err)
		return -EINVAL;
	if (kup_monitor_ops.kup_monitor_core_store)
		err = kup_monitor_ops.kup_monitor_core_store(core);

	return (err < 0) ? err : count;
}

static struct kobj_attribute kup_monitor_core_attr =
	__ATTR(kup_monitor_core, 0600, kup_monitor_core_show,
			kup_monitor_core_store);

static ssize_t kup_monitor_timeout_show(struct kobject *kobj,
				  struct kobj_attribute *attr, char *buf)
{
	unsigned int timeout = 0;

	if (kup_monitor_ops.kup_monitor_timeout_show)
		timeout = kup_monitor_ops.kup_monitor_timeout_show();
	return sprintf(buf, "%u\n", timeout);
}
static ssize_t kup_monitor_timeout_store(struct kobject *kobj,
				   struct kobj_attribute *attr,
				   const char *buf, size_t count)
{
	int err;
	unsigned int timeout;

	err = kstrtouint(buf, 0, &timeout);
	if (err)
		return -EINVAL;
	if (kup_monitor_ops.kup_monitor_timeout_store)
		err = kup_monitor_ops.kup_monitor_timeout_store(timeout);

	return (err < 0) ? err : count;
}

static struct kobj_attribute kup_monitor_timeout_attr =
	__ATTR(kup_monitor_timeout, 0600, kup_monitor_timeout_show,
			kup_monitor_timeout_store);
#endif

static struct attribute *kup_attrs[] = {
	&quick_rollback_enable_attr.attr,
#ifdef CONFIG_EULEROS_ANGEL_CORE_MONITOR
	&kup_monitor_core_attr.attr,
	&kup_monitor_timeout_attr.attr,
#endif
	NULL
};

static const struct attribute_group kup_attr_group = {
	.attrs = kup_attrs,
};

static int __init kernel_upgrade_sysfs_init(void)
{
	int error;
	struct kobject *kup_kobj_base;

	kup_kobj_base = kobject_create_and_add("kup", kernel_kobj);
	if (!kup_kobj_base)
		return -ENOMEM;
	error = sysfs_create_group(kup_kobj_base, &kup_attr_group);
	if (error)
		goto exit;
	return 0;
exit:
	kobject_put(kup_kobj_base);
	return error;
}
core_initcall(kernel_upgrade_sysfs_init);

static void increate_memmap_rollback(char *buf, size_t len)
{
	struct resource *res;
	size_t size;

	res = get_quick_rollback_res();
	if (res == NULL)
		return;

	size = res->end - res->start + 1;
	snprintf(buf, len, "%#lx$%#018llx,", size, res->start);
}

static void increase_memmap_angelcore(char *buf, size_t len)
{
	struct resource *res;
	size_t size;

	res = get_kexec_extend_res(ANGEL_MONITOR_RES_NAME);
	if (res == NULL)
		return;

	size = res->end - res->start + 1;
	snprintf(buf, len, "%#lx$%#018llx,", size, res->start);
}

static void increase_memmap_cpupark(char *buf, size_t len)
{
	struct resource *res;
	size_t size;

	res = get_kexec_extend_res(CPU_PARK_RES_NAME);
	if (res == NULL)
		return;
	size = res->end - res->start + 1;
	snprintf(buf, len, "%#lx$%#018llx,", size, res->start);
}

int increase_memmap_items(char *cmdline, size_t len)
{
	struct {
		char *item;
		void (*fn)(char *buf, size_t len);
	} items[] = {
		{ "quick_rollback=", increate_memmap_rollback },
		{ "angel_monitor=", increase_memmap_angelcore },
		{ "cpuparkmem=", increase_memmap_cpupark },
	};
	const char *memmap = "kexecmem=";
	char *new, *start;
	int i, count, old_count, num = 0;

	new = kzalloc(sizeof(char)*(len + 1), GFP_KERNEL);
	if (new == NULL)
		return -ENOMEM;

	start = strstr(cmdline, memmap);
	if (start == NULL) {
		count = snprintf(new, len, "%s %s", cmdline, memmap);
		if (count == len) {
			pr_warn("no space left to fill 'memmap'\n");
			count = -EINVAL;
			goto out;
		}
		old_count = strlen(new);
	} else {
		char *end, *next, *symbol, *tmp;

		tmp = kzalloc((strlen(cmdline) + 1), GFP_KERNEL);
		if (tmp == NULL) {
			count = -ENOMEM;
			goto out;
		}
		strcat(tmp, cmdline);

		start = strstr(tmp, memmap);
		end = strstr(start, " ");
		if (start != tmp && end == NULL) {
			*(start - 1) = '\0';
			snprintf(new, len, "%s", tmp);
		} else if (start != tmp && end != NULL) {
			*(start - 1) = '\0';
			*end = '\0';
			snprintf(new, len, "%s %s", tmp, end + 1);
		} else if (start == tmp && end != NULL) {
			*end = '\0';
			snprintf(new, len, "%s", end + 1);
		}

		memcpy(cmdline, new, strlen(new));
		sprintf(new + strlen(new), " %s", memmap);
		old_count = strlen(new);

		start += strlen(memmap);
		do {
			next = strstr(start, ",");
			if (next != NULL)
				*next = '\0';

			/* items with flag will be replaced with the following new items */
			symbol = strstr(start, "$");
			if ((symbol != NULL)
				&& (strstr(symbol, "0x0") != NULL))
				continue;

			/* flag '#' '$' '!' is used by kaslr to tag this region should be
			 * avoid for ramdomization address,
			 */
			if (strstr(start, "#") != NULL
					|| strstr(start, "$") != NULL
					|| strstr(start, "!") != NULL)
				num++;

			sprintf(new + strlen(new), "%s,", start);
		} while (next != NULL && (start = next + 1));

		kfree(tmp);
	}

	for (i = 0; i < ARRAY_SIZE(items); i++) {
		if (strstr(cmdline, items[i].item) && num < MAX_QUICK_KEXEC_REGIONS) {
			count = strlen(new);
			items[i].fn(new + count, len - count);
			if (new[len] != '\0') {
				pr_warn("no space left to fill memmap item\n");
				count = -EINVAL;
				goto out;
			}
			num++;
		}
	}

	count = strlen(new) - old_count;
	if (count > 0) {
		new[strlen(new) - 1] = '\0';
		memcpy(cmdline, new, strlen(new));
	}
out:
	kfree(new);
	return count;
}

#ifdef CONFIG_X86
int do_modify_cmdline(struct boot_params *boot_params, size_t size)
{
	struct setup_header *hdr;
	char *cmdline;
	u64 cmdline_offset;
	int retval = 0;

	if (size < sizeof(struct boot_params)) {
		retval = -EFAULT;
		goto out;
	}

	hdr = &boot_params->hdr;
	if (memcmp((char *)&hdr->header, "HdrS", strlen("HdrS")) != 0) {
		retval = -EFAULT;
		goto out;
	}

	/* offset is from kexec-tools source,
	 * 512 is sector size, where is from arch/x86/boot/tools/build.c
	 */
	cmdline_offset = (hdr->setup_sects + 1) * 512;
	cmdline = (char *)boot_params + cmdline_offset;

	pr_info("original cmdline: %s\n", cmdline);

	retval = increase_memmap_items(cmdline, hdr->cmdline_size);
	if (retval < 0)
		goto out;

	pr_info("new cmdline: %s\n", cmdline);
out:
	return retval;
}

/* skip the last kexecmem cmdstr which added by kexec cmd */
void skip_kexecmem_cmd(char *cmdline)
{
	char *end;
	const char *kexec_cmd = "kexecmem=";

	end = cmdline + strlen(cmdline) - 1;
	while (end >= cmdline && *end) {
		if (*end == ' ')
			break;
		end--;
	}

	if (end < cmdline)
		return;

	if (strstr(end, kexec_cmd))
		*end = '\0';
}
#endif

#ifdef CONFIG_EULEROS_ANGEL_CORE_MONITOR
static unsigned int angel_core = INVALID_ANGEL_CORE;

bool is_angel_core(unsigned int cpu)
{
	return (cpu == angel_core);
}

bool has_angel_core(void)
{
	return (angel_core && angel_core != INVALID_ANGEL_CORE);
}

unsigned int get_angel_core(void)
{
	return angel_core;
}

void set_angel_core(unsigned int cpu)
{
	angel_core = cpu;
}

unsigned int *angel_core_event;

unsigned int get_angel_core_event(void)
{
	unsigned int *tmp = angel_core_event;

	if (tmp)
		return *tmp;
	return 0;
}

void set_angel_core_event(unsigned int *addr)
{
	angel_core_event = addr;
}

void clear_angel_core_event(void)
{
	*angel_core_event = 0;
}

#ifdef CONFIG_X86
int quick_rollback_handler(unsigned int cmd, struct pt_regs *regs)
{
	boot_try_rollback();
	return NMI_DONE;
}
#endif

/* record pre-upgrade angel timeout configuration */
static unsigned  long last_angel_timeout = DEFAULT_ANGEL_MONITOR_TIMEOUT_MS;

/* record pre-upgrade angel timeout configuration */
void set_last_angel_timeout(unsigned long timeout)
{
#ifdef CONFIG_X86
	last_angel_timeout = timeout / tsc_khz;
#elif defined CONFIG_ARM64
	unsigned int tsc_rate;

	/*
	* timeout value range: [0.5 * tsc_rate, 10 * tsc_rate],
	* tsc_rate is u32, last_angel_timeout is u64,
	* last_angel_timeout * 1000 never overflow.
	*/
	tsc_rate = arch_timer_get_cntfrq();
	last_angel_timeout = timeout * 1000 / tsc_rate;
#endif
}

unsigned long get_last_angel_timeout(void)
{
	return last_angel_timeout;
}

#endif
