// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved.
 * Description: support store aml in reserved vram memory
 * Author: gaozhekang
 * Create: 2021.04.28
 */

#define pr_fmt(fmt) "vaml: " fmt

#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/acpi.h>
#include <linux/vpmem_metadata.h>

#define VSTORE_OFFSET	(METADATA_SIZE)
#define VSTORE_SIZE		(_AC(1, UL) << 21) //2M
#define VAML_OFFSET		(VSTORE_OFFSET + VSTORE_SIZE)
#define VAML_SIZE		(_AC(1, UL) << 20) //1M

#define VAML_SIG	"NANO"

static struct acpi_table_header g_vaml_header;
static char *g_vaml_start;
static unsigned int g_vaml_len;

static DEFINE_RAW_SPINLOCK(vaml_lock);

static struct proc_dir_entry *vaml_entry;

extern int first_valid_vpmem_start(u64 *start);

static int vaml_show(struct seq_file *m, void *p)
{
	unsigned int i;
	unsigned long flags;

	raw_spin_lock_irqsave(&vaml_lock, flags);
	for (i = 0; i < g_vaml_len; i++)
		seq_putc(m, g_vaml_start[i]);
	raw_spin_unlock_irqrestore(&vaml_lock, flags);
	return 0;
}

static int vaml_open(struct inode *inode, struct file *file)
{
	pr_info("start %s\n", __func__);
	return single_open(file, vaml_show, NULL);
}

static ssize_t vaml_write(struct file *file, const char __user *ubuf,
						  size_t count, loff_t *ppos)
{
	unsigned long free;
	unsigned long left;
	unsigned long need_write;
	unsigned long flags;
	ssize_t ret = -ENOBUFS;

	if (!ubuf || *ppos < 0)
		return -EINVAL;

	free = (*ppos < VAML_SIZE) ? (VAML_SIZE - *ppos) : 0;
	pr_info("start %s free: %lu, ppos: %lld\n", __func__, free, *ppos);
	if (free > 0) {
		need_write = free > count ? count : free;

		raw_spin_lock_irqsave(&vaml_lock, flags);
		left = copy_from_user(g_vaml_start + *ppos, (void __user *)ubuf, need_write);
		ret = (ssize_t)(need_write - left);

		if (left != 0) {
			ret = -EFAULT;
			raw_spin_unlock_irqrestore(&vaml_lock, flags);
			pr_err("copy_from_user failed, need_write %lu bytes, left %lu bytes\n",
				   need_write, left);
			return ret;
		}

		g_vaml_len = *ppos + ret;
		*ppos = g_vaml_len;

		pr_info("vaml_len: %u\n", g_vaml_len);
		raw_spin_unlock_irqrestore(&vaml_lock, flags);
	}

	return ret;
}

static struct proc_ops vaml_ops = {
	.proc_open		= vaml_open,
	.proc_read		= seq_read,
	.proc_write		= vaml_write,
	.proc_lseek		= seq_lseek,
	.proc_release	= single_release,
};

static int __init vaml_init(void)
{
	u64 start = 0;
	u64 vaml_base = 0;

	pr_info("starting %s\n", __func__);
	if (first_valid_vpmem_start(&start) < 0) {
		pr_warn("no valid vram entry, failed to init vaml\n");
		return 0;
	}

	vaml_base = start + VAML_OFFSET;
	g_vaml_start = ioremap(vaml_base, VAML_SIZE);

	if (g_vaml_start == NULL) {
		pr_err("ioremap 0x%pK failed, size: 0x%lx\n", (void *)vaml_base, VAML_SIZE);
		return -1;
	}

	pr_info("ioremap 0x%pK to 0x%pK success, size: 0x%lx\n",
			(void *)vaml_base, (void *)g_vaml_start, VAML_SIZE);

	if (strncmp(g_vaml_start, VAML_SIG, strlen(VAML_SIG))) {
		g_vaml_len = 0;
		memset_io(g_vaml_start, 0, VAML_SIZE);
		pr_info("vaml buffer has not been initialized before\n");
	} else {
		memcpy_toio(&g_vaml_header, g_vaml_start, sizeof(g_vaml_header));
		/* len mustn't greater than VAML_SIZE, in case kernel crash */
		g_vaml_len = g_vaml_header.length > VAML_SIZE ? VAML_SIZE : g_vaml_header.length;

		pr_info("vaml buffer has been already initialized, len is %u\n", g_vaml_len);
	}

	vaml_entry = proc_create("vaml", 0600, NULL, &vaml_ops);
	if (!vaml_entry) {
		iounmap(g_vaml_start);
		pr_err("proc vaml entry create failed, unmap 0x%pK\n", (void *)g_vaml_start);
		return -1;
	}

	pr_info("%s success\n", __func__);
	return 0;
}

core_initcall(vaml_init);
