// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) Huawei Technologies Co., Ltd. 2023. All rights reserved.
 */

#include <linux/memory_patrol.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/rwsem.h>
#include <linux/completion.h>

#define SCAN_PAGES_BASEMINOR 0
#define SCAN_PAGES_COUNT 1
#define SCAN_PAGES_NAME "scan_pages"

#define NODE_NAME_LEN 50

/*
 * The users of page_manager (e.g. mmap, ioctl) drop
 * page_manager->in_use when they finish. If the value
 * becomes UNREGISTER_COUNT, it means no user is using
 * the page_manager and unregistration process is waiting
 * for all users to finish, so the last user calls complete()
 * to wake up unregistration process to free page_manager.
 */
enum {UNREGISTER_COUNT = -1U<<31};

struct scannable_mem_priv {
	pg_data_t *pgdat;
	void (*sprint_free_pages)(struct seq_file *m, int nid);
	struct mutex lock;
};

struct page_manager {
	struct list_head list;
	struct scan_mem_operations *ops;
	struct scannable_mem_priv *privs[MAX_NUMNODES];
	atomic_t in_use;
	struct completion unregister;
};

struct scan_pages_ioctl_arg {
	unsigned long start_pfn;
	unsigned long nr_pages;
	int fd;
};

#define ALLOC_PAGES_PFN_FD_CMD	_IOWR('p', 1, struct scan_pages_ioctl_arg)

static struct proc_dir_entry *scannable_mem_root;
static struct proc_dir_entry *scannable_mem_pdes[MAX_NUMNODES];
static struct cdev scan_pages_cdev;
LIST_HEAD(page_manager_list);
DECLARE_RWSEM(page_manager_list_rwsem);

static inline void get_page_manager(struct page_manager *page_manager)
{
	atomic_inc(&page_manager->in_use);
}

static inline void put_page_manager(struct page_manager *page_manager)
{
	if (atomic_dec_return(&page_manager->in_use) == UNREGISTER_COUNT)
		complete(&page_manager->unregister);
}

static int scannable_mem_show(struct seq_file *m, void *v)
{
	struct scannable_mem_priv *priv = m->private;

	mutex_lock(&priv->lock);
	priv->sprint_free_pages(m, priv->pgdat->node_id);
	mutex_unlock(&priv->lock);

	return 0;
}
DEFINE_PROC_SHOW_ATTRIBUTE(scannable_mem);

static void __register_scannable_mem(struct page_manager *page_manager, int nid)
{
	struct proc_dir_entry *node_pde = scannable_mem_pdes[nid];
	struct scan_mem_operations *ops = page_manager->ops;
	struct scannable_mem_priv *priv;

	priv = kmalloc(sizeof(*priv), GFP_KERNEL);
	if (unlikely(!priv)) {
		pr_warn("Fail to alloc scannable_mem metadata for node%d/%s\n", nid, ops->name);
		return;
	}

	priv->pgdat = NODE_DATA(nid);
	priv->sprint_free_pages = ops->sprint_free_pages;
	mutex_init(&priv->lock);
	page_manager->privs[nid] = priv;
	proc_create_data(ops->name, 0400, node_pde, &scannable_mem_proc_ops, priv);
}

static void register_scannable_mem(struct page_manager *page_manager)
{
	pg_data_t *pgdat;

	for_each_online_pgdat(pgdat)
		__register_scannable_mem(page_manager, pgdat->node_id);
}

static void __unregister_scannable_mem(struct page_manager *page_manager, int nid)
{
	struct proc_dir_entry *node_pde = scannable_mem_pdes[nid];
	struct scan_mem_operations *ops = page_manager->ops;

	remove_proc_entry(ops->name, node_pde);
	kfree(page_manager->privs[nid]);
	page_manager->privs[nid] = NULL;
}

static void unregister_scannable_mem(struct page_manager *page_manager)
{
	pg_data_t *pgdat;

	for_each_online_pgdat(pgdat)
		__unregister_scannable_mem(page_manager, pgdat->node_id);
}

int register_mem_patrol(struct scan_mem_operations *ops)
{
	struct page_manager *page_manager, *iter;
	int ret = -EINVAL;

	if (!ops || !ops->name || !ops->sprint_free_pages ||
		!ops->is_page_managed)
		return ret;

	/*
	 * The supported combinations of alloc_pages_pfn, free_pages_pfn and
	 * alloc_pages_pfn_fd:
	 * 1. ops->alloc_pages_pfn_fd && !ops->alloc_pages_pfn && !ops->free_pages_pfn
	 * 2. !ops->alloc_pages_pfn_fd && ops->alloc_pages_pfn && ops->free_pages_pfn
	 */
	if (ops->alloc_pages_pfn_fd) {
		if (ops->alloc_pages_pfn || ops->free_pages_pfn)
			return ret;
	} else {
		if (!ops->alloc_pages_pfn || !ops->free_pages_pfn)
			return ret;
	}

	down_write(&page_manager_list_rwsem);
	list_for_each_entry(iter, &page_manager_list, list)
		if (iter->ops == ops || !strcmp(iter->ops->name, ops->name))
			goto err;

	page_manager = kmalloc(sizeof(*page_manager), GFP_KERNEL);
	if (!page_manager) {
		ret = -ENOMEM;
		goto err;
	}

	page_manager->ops = ops;
	atomic_set(&page_manager->in_use, 0);
	init_completion(&page_manager->unregister);
	list_add_tail(&page_manager->list, &page_manager_list);
	up_write(&page_manager_list_rwsem);

	register_scannable_mem(page_manager);
	return 0;
err:
	up_write(&page_manager_list_rwsem);
	return ret;
}
EXPORT_SYMBOL(register_mem_patrol);

void unregister_mem_patrol(struct scan_mem_operations *ops)
{
	struct page_manager *page_manager = NULL, *iter;

	if (!ops)
		return;

	down_write(&page_manager_list_rwsem);
	list_for_each_entry(iter, &page_manager_list, list)
		if (iter->ops == ops) {
			page_manager = iter;
			break;
		}

	if (!page_manager) {
		up_write(&page_manager_list_rwsem);
		return;
	}

	list_del(&page_manager->list);
	up_write(&page_manager_list_rwsem);
	unregister_scannable_mem(page_manager);

	if (atomic_add_return(UNREGISTER_COUNT, &page_manager->in_use) != UNREGISTER_COUNT)
		wait_for_completion(&page_manager->unregister);
	kfree(page_manager);
}
EXPORT_SYMBOL(unregister_mem_patrol);

void mem_patrol_register_node(struct node *node)
{
	int nid = node->dev.id;
	char name[NODE_NAME_LEN];
	struct page_manager *page_manager;

	if (!scannable_mem_root)
		return;

	snprintf(name, sizeof(name), "node%d", nid);
	scannable_mem_pdes[nid] = proc_mkdir(name, scannable_mem_root);
	if (!scannable_mem_pdes[nid]) {
		pr_warn("Fail to create /proc/scannable_memory/%s\n", name);
		return;
	}

	/*
	 * Create per-node scannable_memory interfaces for
	 * each scan_memory_ops when the node is hot added.
	 */
	down_read(&page_manager_list_rwsem);
	list_for_each_entry(page_manager, &page_manager_list, list)
		__register_scannable_mem(page_manager, nid);
	up_read(&page_manager_list_rwsem);
}

void mem_patrol_unregister_node(struct node *node)
{
	int nid = node->dev.id;
	struct proc_dir_entry *pde = scannable_mem_pdes[nid];
	struct page_manager *page_manager;

	proc_remove(pde);
	down_read(&page_manager_list_rwsem);
	list_for_each_entry(page_manager, &page_manager_list, list)
		kfree(page_manager->privs[nid]);
	up_read(&page_manager_list_rwsem);
}

static void scan_pages_vma_close(struct vm_area_struct *vma)
{
	unsigned long start_pfn = vma->vm_pgoff;
	unsigned long nr_pages = (vma->vm_end - vma->vm_start) / PAGE_SIZE;
	struct page_manager *page_manager = vma->vm_private_data;
	struct scan_mem_operations *ops = page_manager->ops;

	ops->free_pages_pfn(start_pfn, nr_pages);
	put_page_manager(page_manager);
}

/*
 * don't support vma split, i.e. don't support to unmap a subrange
 */
static int scan_pages_vma_split(struct vm_area_struct *vma, unsigned long addr)
{
	return -EINVAL;
}

static vm_fault_t scan_pages_fault(struct vm_fault *vmf)
{
	pr_warn("Patrol page %lx should not be swapped\n", vmf->address);
	return 0;
}

const struct vm_operations_struct scan_pages_vmops = {
	.split = scan_pages_vma_split,
	.close = scan_pages_vma_close,
	.fault = scan_pages_fault,
};

static struct page_manager *find_page_manager(unsigned long start_pfn)
{
	struct page_manager *page_manager = NULL, *iter;
	struct scan_mem_operations *ops;

	down_read(&page_manager_list_rwsem);
	list_for_each_entry(iter, &page_manager_list, list) {
		ops = iter->ops;
		if (ops->is_page_managed(start_pfn)) {
			get_page_manager(iter);
			page_manager = iter;
			break;
		}
	}
	up_read(&page_manager_list_rwsem);

	return page_manager;
}

static int scan_pages_mmap(struct file *file, struct vm_area_struct *vma)
{
	unsigned long size = vma->vm_end - vma->vm_start;
	unsigned long start_pfn = vma->vm_pgoff;
	unsigned long nr_pages;
	struct page *start_page;
	struct page_manager *page_manager;
	struct scan_mem_operations *scan_memory_ops;
	int ret = -EINVAL;

	if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
		return ret;

	if (!IS_ALIGNED(size, PAGE_SIZE * pageblock_nr_pages))
		return ret;

	page_manager = find_page_manager(start_pfn);
	if (!page_manager)
		return ret;

	scan_memory_ops = page_manager->ops;
	if (!scan_memory_ops->alloc_pages_pfn)
		goto err;

	nr_pages = size / PAGE_SIZE;
	ret = scan_memory_ops->alloc_pages_pfn(start_pfn, nr_pages);
	if (ret)
		goto err;

	vma->vm_ops = &scan_pages_vmops;
	vma->vm_private_data = page_manager;

	start_page = pfn_to_page(start_pfn);
	memset(page_address(start_page), 0, size);

	if (file->f_flags & O_SYNC)
		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);

	ret = remap_pfn_range(vma, vma->vm_start, start_pfn,
		size, vma->vm_page_prot);
	if (ret) {
		scan_memory_ops->free_pages_pfn(start_pfn, nr_pages);
		vma->vm_ops = NULL;
		vma->vm_private_data = NULL;
		goto err;
	}

	return 0;
err:
	put_page_manager(page_manager);
	return ret;
}

static int scan_pages_open(struct inode *inode, struct file *file)
{
	if (current->flags & PF_MEMORY_PATROL)
		return -EINVAL;

	current->flags |= PF_MEMORY_PATROL;

	return 0;
}

static int scan_pages_release(struct inode *inode, struct file *file)
{
	current->flags &= ~PF_MEMORY_PATROL;

	return 0;
}

static long alloc_pages_pfn_fd(struct scan_pages_ioctl_arg *ioctl_arg)
{
	unsigned long start_pfn = ioctl_arg->start_pfn;
	unsigned long nr_pages = ioctl_arg->nr_pages;
	struct page_manager *page_manager;
	struct scan_mem_operations *scan_memory_ops;
	int ret = -EINVAL;

	page_manager = find_page_manager(start_pfn);
	if (!page_manager)
		return ret;

	scan_memory_ops = page_manager->ops;
	if (!scan_memory_ops->alloc_pages_pfn_fd)
		goto out;

	ret = scan_memory_ops->alloc_pages_pfn_fd(start_pfn, nr_pages);
	if (ret >= 0) {
		ioctl_arg->fd = ret;
		ret = 0;
	}

out:
	put_page_manager(page_manager);
	return ret;
}

static long scan_pages_ioctl(struct file *filep, unsigned int cmd,
		unsigned long arg)
{
	struct scan_pages_ioctl_arg ioctl_arg;
	int ret = 0;

	if (copy_from_user(&ioctl_arg, (void __user *)arg, sizeof(ioctl_arg)))
		return -EFAULT;

	if (cmd != ALLOC_PAGES_PFN_FD_CMD)
		return -EINVAL;

	ret = alloc_pages_pfn_fd(&ioctl_arg);
	if (ret)
		return ret;

	if (copy_to_user((void __user *)arg, &ioctl_arg, sizeof(ioctl_arg)))
		return -EFAULT;

	return 0;
}

static const struct file_operations scan_pages_fops = {
	.mmap = scan_pages_mmap,
	.open = scan_pages_open,
	.release = scan_pages_release,
	.unlocked_ioctl = scan_pages_ioctl,
};

static int __init scan_pages_chr_dev_init(void)
{
	struct class *scan_pages_class;
	struct device *scan_pages_dev;
	dev_t dev;
	int ret;

	ret = alloc_chrdev_region(&dev, SCAN_PAGES_BASEMINOR,
								SCAN_PAGES_COUNT, SCAN_PAGES_NAME);
	if (ret)
		return ret;

	scan_pages_class = class_create(THIS_MODULE, SCAN_PAGES_NAME);
	if (IS_ERR(scan_pages_class)) {
		ret = PTR_ERR(scan_pages_class);
		goto unregister_chrdev;
	}

	cdev_init(&scan_pages_cdev, &scan_pages_fops);
	ret = cdev_add(&scan_pages_cdev, dev, SCAN_PAGES_COUNT);
	if (ret)
		goto destroy_class;

	scan_pages_dev = device_create(scan_pages_class, NULL,
				dev, NULL, SCAN_PAGES_NAME);
	if (IS_ERR(scan_pages_dev)) {
		ret = PTR_ERR(scan_pages_dev);
		goto delete_cdev;
	}

	return 0;

delete_cdev:
	cdev_del(&scan_pages_cdev);
destroy_class:
	class_destroy(scan_pages_class);
unregister_chrdev:
	unregister_chrdev_region(dev, SCAN_PAGES_COUNT);

	return ret;
}
device_initcall(scan_pages_chr_dev_init);

static void __init scannable_mem_init(void)
{
	scannable_mem_root = proc_mkdir("scannable_memory", NULL);
	if (!scannable_mem_root)
		pr_warn("Fail to create /proc/scannable_memory\n");
}
core_initcall(scannable_mem_init);
