/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021.
 * Description: support dynamic-reserved-memory feature
 * Author: cheng chao
 * Create: 2021-8-14
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/ioport.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/of_address.h>

/* pme lack memory */
#define DYNMEM_PROC_MEM "dynmem"
#define MAX_DYNMEM_LEN (MAX_DYN_MEM_REGIONS*64)
#define MAX_DYN_MEM_REGIONS 16

static struct proc_dir_entry *dynmem_proc_free;
u32 dynamic_regions_n;

struct mutex dyn_mem_lock;
struct resource dynamic_res[MAX_DYN_MEM_REGIONS] = {
	[0 ... MAX_DYN_MEM_REGIONS-1].name = "Dyn reserved mem",
	[0 ... MAX_DYN_MEM_REGIONS-1].flags = 0,
};

static void __init add_reserve_memory(phys_addr_t start, phys_addr_t size)
{
	if (start + size < start) {
		pr_err("start + size overflow.");
		return;
	}

	if (dynamic_regions_n >= MAX_DYN_MEM_REGIONS) {
		pr_info("too many DYN_MEM_REGIONS(0x%llx).\n", (u64)start);
		return;
	}

	if ((start & (~PAGE_MASK)) || (size & (~PAGE_MASK))) {
		pr_err("DYN_MEM_REGIONS start:0x%llx size:0x%llx not align.\n", (u64)start, (u64)size);
		return;
	}

	if (size == 0) {
		pr_err("DYN_MEM_REGIONS size:0x%llx must > 0.\n", (u64)size);
		return;
	}

	dynamic_res[dynamic_regions_n].start = start;
	dynamic_res[dynamic_regions_n].end = start + size - 1;
	dynamic_res[dynamic_regions_n].flags = IORESOURCE_BUSY | IORESOURCE_MEM;
	dynamic_regions_n += 1;
}

static void __init dynamic_reserve_memory(void)
{
	int i, err;

	for (i = 0; i < dynamic_regions_n; i++) {
		if (memblock_is_memory(dynamic_res[i].start) == false ||
				memblock_is_memory(dynamic_res[i].end) == false) {
			pr_err("start:0x%llx end:0x%llx overrun\n",
				(u64)dynamic_res[i].start, (u64)dynamic_res[i].end);
			dynamic_res[i].flags = 0;
			dynamic_res[i].start = 0;
			dynamic_res[i].end = 0;
			continue;
		}

		err = memblock_is_region_reserved(dynamic_res[i].start, dynamic_res[i].end - dynamic_res[i].start);
		if (err) {
			pr_err("start:0x%llx end:0x%llx overlap with others\n",
				(u64)dynamic_res[i].start, (u64)dynamic_res[i].end);
			dynamic_res[i].flags = 0;
			dynamic_res[i].start = 0;
			dynamic_res[i].end = 0;
			continue;
		}

		err = memblock_reserve(dynamic_res[i].start,
			dynamic_res[i].end - dynamic_res[i].start + 1);
		if (err < 0) {
			dynamic_res[i].start = 0;
			dynamic_res[i].end = 0;
			dynamic_res[i].flags = IORESOURCE_DISABLED;
			pr_err("reservation failed - memory in use (0x%llx)\n", (u64)dynamic_res[i].start);
			continue;
		}

		insert_resource(&iomem_resource, &dynamic_res[i]);
	}
}

static void __init dts_dynamic_reserve_memory(void)
{
	struct device_node *node;
	u32 reserve_memblock;
	int i, err;

	node = of_find_compatible_node(NULL, NULL, "dynamic_memory");
	if (node == NULL)
		return;

	if (of_property_read_u32(node, "#dynamic-mem-regions", &reserve_memblock)) {
		pr_warn("DYN_MEM_REGIONS get dts data err.\n");
		return;
	}

	if (reserve_memblock > (MAX_DYN_MEM_REGIONS - dynamic_regions_n)) {
		reserve_memblock = min(MAX_DYN_MEM_REGIONS - dynamic_regions_n, reserve_memblock);
		pr_warn("DYN_MEM_REGIONS too many,%d regions valid.\n", reserve_memblock);
	}

	for (i = 0; i < reserve_memblock; i++) {
		struct resource res;

		err = of_address_to_resource(node,  i, &res);
		if (err) {
			pr_err("%s: get regions %d err!\n", node->full_name, i);
			return;
		}

		add_reserve_memory(res.start, resource_size(&res));
	}
}

void __init dynmem(void)
{
	pr_info("dynamic memory enable.\n");
	dts_dynamic_reserve_memory();
	dynamic_reserve_memory();
}

static int __init early_dynmem(char *p)
{
	unsigned long long size;
	unsigned long long start = 0;
	char *endp = NULL;

	size = memparse(p, &endp);
	if (*endp == '@')
		start = memparse(endp + 1, NULL);

#ifndef CONFIG_PHYS_ADDR_T_64BIT
	if (start > UINT_MAX || size > UINT_MAX) {
		pr_err("start or size large than 32bit physical address.");
		return -EINVAL;
	}
#endif

	add_reserve_memory((phys_addr_t)start, (phys_addr_t)size);

	return 0;
}
early_param("dynmem", early_dynmem);

static int find_dyn_reserve_memory(phys_addr_t start, phys_addr_t len)
{
	int i;

	for (i = 0; i < dynamic_regions_n; i++) {
		if ((dynamic_res[i].start == start) &&
			(dynamic_res[i].end - dynamic_res[i].start + 1) == len)
			return i;
	}

	return -EINVAL;
}

static ssize_t dynmem_proc_free_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos)
{
	/* for address and size 64byte is enough */
	char buffer[64] = {0};
	char *tmp = NULL;
	phys_addr_t value[2] = {0};
	unsigned long long phys;
	struct page *ppage = NULL;
	int n;

	if (count >= (sizeof(buffer) - 1))
		return -EINVAL;

	if (copy_from_user(buffer, buf, count))
		return -EFAULT;

	value[0]  = memparse(buffer, &tmp);
	if (*tmp == ' ')
		value[1] = memparse(tmp + 1, NULL);

	mutex_lock(&dyn_mem_lock);
	n = find_dyn_reserve_memory(value[0], value[1]);
	if (n >= 0) {
		for (phys = dynamic_res[n].start; phys < dynamic_res[n].end; phys += PAGE_SIZE) {
			ppage = phys_to_page(phys);
			free_reserved_page(ppage);
		}

		release_resource(&dynamic_res[n]);
		dynamic_res[n].start = 0;
		dynamic_res[n].end = 0;
		dynamic_res[n].flags = IORESOURCE_DISABLED;

	} else {
		pr_warn("DYN_MEM_REGIONS invalid input %pa %pa\n",
			&value[0], &value[1]);
	}
	mutex_unlock(&dyn_mem_lock);

	return count;
}

static int dynmem_proc_free_read(struct seq_file *m, void *v)
{
	int i;

	mutex_lock(&dyn_mem_lock);
	for (i = 0; i < dynamic_regions_n; i++) {
		if ((dynamic_res[i].flags == 0) || (dynamic_res[i].flags == IORESOURCE_DISABLED))
			continue;
		seq_printf(m, "0x%llx 0x%llx\n", (u64)dynamic_res[i].start,
			(u64)dynamic_res[i].end - (u64)dynamic_res[i].start + 1);
	}
	mutex_unlock(&dyn_mem_lock);

	return 0;
}

static int dynmem_proc_free_open(struct inode *inode, struct file *file)
{
	return single_open(file, dynmem_proc_free_read, NULL);
}

static const struct proc_ops dynmem_ops = {
	.proc_open         = dynmem_proc_free_open,
	.proc_write        = dynmem_proc_free_write,
	.proc_read         = seq_read,
	.proc_lseek        = seq_lseek,
	.proc_release      = single_release,
};

/* init  proc fs for dynmem */
static int __init dynmem_proc_init(void)
{
	int ret = 0;

	dynmem_proc_free = proc_create(DYNMEM_PROC_MEM, 0640, NULL,
			&dynmem_ops);
	if (dynmem_proc_free == NULL) {
		pr_err("cannot create %s procfs entry\n", DYNMEM_PROC_MEM);
		ret = -EINVAL;
		return ret;
	}
	mutex_init(&dyn_mem_lock);
	return ret;
}

late_initcall(dynmem_proc_init);
