// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2023 HUAWEI TECHNOLOGIES CO., LTD.
 */
#include "misc.h"
#include "error.h"

#include <../../../../lib/vpmem.c>

/* Macros used by the included decompressor code below. */
#define STATIC
#include <linux/decompress/mm.h>

static bool vpmem_os_mem_auto;
static unsigned long vpmem_os_mem_size;
struct vpmem_early_table avoid_table_vpmem;

/* vpmem avoid mem */
static int vpmem_memparse_check(u64 from, u64 to)
{
	if (!IS_ALIGNED(from, PMD_SIZE)) {
		error_putstr("VPMEM start address ");
		error_puthex(from);
		error_putstr(" not aligned with 2M\n");
		return -EINVAL;
	}

	if (!IS_ALIGNED(to, PMD_SIZE)) {
		error_putstr("VPMEM end address 0x");
		error_puthex(to);
		error_putstr(" not aligned with 2M\n");
		return -EINVAL;
	}
	if (from >= to) {
		error_putstr("vpmem mem [0x");
		error_puthex(from);
		error_putstr("-0x");
		error_puthex(to);
		error_putstr("] Invalid VPMEM range\n");
		return -EINVAL;
	}
	return 0;
}

void vpmem_avoid_memmap(char *p)
{
	avoid_table_vpmem.nr_entries = 0;
	if (vpmem_memparse(p, &vpmem_os_mem_auto, &vpmem_os_mem_size,
			  &avoid_table_vpmem, vpmem_memparse_check))
		error("vpmem args memparse failed");
}

static int vpmem_init_table_kaslr(void)
{
	unsigned long table_addr;
	struct acpi_table_header *table_header;
	char *arg;
	int ret = 0;
	int len = get_max_acpi_arg_length();

	arg = (char *)malloc(sizeof(char) * len);
	if (avoid_table_vpmem.nr_entries > 0)
		goto cleanup;

	if (cmdline_find_option("acpi", arg, sizeof(arg)) == 3 &&
	    !strncmp(arg, "off", 3)) {
		ret = -ENODEV;
		goto cleanup;
	}

	if (cmdline_find_option("numa", arg, sizeof(arg)) == 3 &&
	    !strncmp(arg, "off", 3)) {
		ret = -ENODEV;
		goto cleanup;
	}

	table_addr = get_acpi_srat_table();
	if (!table_addr)
		goto cleanup;

	table_header = (struct acpi_table_header *)table_addr;
	if (vpmem_init_table(table_header, &avoid_table_vpmem))
		error("too many vpmem entries.");

cleanup:
	free(arg);
	return ret;
}

static void vpmem_single_numa_exclusive(void)
{
	debug_putstr("vpmem single numa\n");
	avoid_table_vpmem.nr_entries = 1;
	avoid_table_vpmem.entries[0].addr = vpmem_os_mem_size;
	avoid_table_vpmem.entries[0].size = ULLONG_MAX - vpmem_os_mem_size;
}

static void __handle_avoid_vpmem_mem_regions(void)
{
	if (vpmem_os_mem_auto) {
		if (vpmem_init_table_kaslr() < 0) {
			error("vpmem auto but init table failed");
			return;
		}
		vpmem_os_mem_size = vpmem_get_auto_os_mem_size(&avoid_table_vpmem);
		debug_putaddr(vpmem_os_mem_size);
		if (!vpmem_os_mem_size) {
			error("vpmem auto but vpmem_os_mem_size is 0");
			return;
		}
	}

	/*
	 * Not auto and os_mem_size is 0, means userdef range,
	 * which has been saved in handle_mem_options
	 */
	if (!vpmem_os_mem_size)
		return;

	if (vpmem_init_table_kaslr() < 0 ||
	    vpmem_mult_numa_equalize(&avoid_table_vpmem, vpmem_os_mem_size) < 0) {
		vpmem_single_numa_exclusive();
		return;
	}
}

static void vpmem_debug_print_mem_region(void)
{
	int i;

	for (i = 0; i < avoid_table_vpmem.nr_entries; i++) {
		debug_putaddr(avoid_table_vpmem.entries[i].addr);
		debug_putaddr(avoid_table_vpmem.entries[i].size);
	}
}

void handle_avoid_vpmem_mem_regions(void)
{
	__handle_avoid_vpmem_mem_regions();
	vpmem_debug_print_mem_region();
}

/* for vpmem mem avoid */
void vpmem_mem_avoid(struct mem_vector *img, struct mem_vector *overlap,
					 u64 *earliest, bool *is_overlapping)
{
	int i;

	for (i = 0; i < avoid_table_vpmem.nr_entries; i++) {
		struct mem_vector vector = {
			.start = avoid_table_vpmem.entries[i].addr,
			.size = avoid_table_vpmem.entries[i].size,
		};
		if (vector.size && mem_overlaps(img, &vector) &&
		    vector.start < *earliest) {
			*overlap = vector;
			*earliest = overlap->start;
			*is_overlapping = true;
		}
	}
}
