// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.
 * Description: Euler Hybrid Memory Management.
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/mmzone.h>
#include <linux/mm_types.h>
#include <linux/uaccess.h>
#include <linux/hpmm.h>
#include <linux/gfp.h>

#include "hpmm_main.h"

static DEFINE_SPINLOCK(hpmm_lock);
static LIST_HEAD(hpmm_head);

void hpmm_register_memory_driver(struct hpmm_driver *driver)
{
	spin_lock(&hpmm_lock);
	list_add(&driver->list, &hpmm_head);
	spin_unlock(&hpmm_lock);
}

void hpmm_unregister_memory_driver(struct hpmm_driver *driver)
{
	spin_lock(&hpmm_lock);
	list_del(&driver->list);
	spin_unlock(&hpmm_lock);
}

static struct hpmm_driver *hpmm_driver_lookup(int type)
{
	struct hpmm_driver *driver;

	spin_lock(&hpmm_lock);
	list_for_each_entry(driver, &hpmm_head, list)
		if (driver->type == type) {
			spin_unlock(&hpmm_lock);
			return driver;
		}
	spin_unlock(&hpmm_lock);

	return NULL;
}

static const char *hpmm_name(struct vm_area_struct *vma)
{
	struct hpmm_device *hpmm_dev;

	WARN_ON(!(vma->vm_flags & VM_HYBRID_MM));
	hpmm_dev = vma->vm_private_data;

	return hpmm_dev->name;
}

static const struct vm_operations_struct hpmm_vm_ops = {
	.fault = hpmm_fault,
	.name  = hpmm_name,
};

static int hpmm_open(struct inode *inode, struct file *file)
{
	struct hpmm_device *hpmm_dev;

	hpmm_dev = kzalloc(sizeof(struct hpmm_device), GFP_KERNEL);
	if (!hpmm_dev)
		return -ENOMEM;

	mutex_init(&hpmm_dev->hpmm_lock);
	INIT_LIST_HEAD(&hpmm_dev->p_dev);
	file->private_data = hpmm_dev;

	return 0;
}

static int hpmm_release_dev_info(struct hpmm_device *hpmm_dev)
{
	struct hpmm_driver *driver;

	driver = hpmm_driver_lookup(hpmm_dev->mem_type);
	if (!driver) {
		pr_warn("Unsupported memory type:%d\n", hpmm_dev->mem_type);
		return -EINVAL;
	}

	driver->destroy(hpmm_dev);

	return 0;
}

static int hpmm_release(struct inode *inode, struct file *file)
{
	struct hpmm_device *hpmm_dev;

	hpmm_dev = file->private_data;
	if (!hpmm_dev) {
		pr_err("release failed, the file is not right");
		return -EINVAL;
	}

	if (!hpmm_dev->mem_type) {
		kfree(hpmm_dev);
		return 0;
	}

	hpmm_release_dev_info(hpmm_dev);
	kfree(hpmm_dev);

	return 0;
}

static int hpmm_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct hpmm_device *hpmm_dev;
	loff_t len, vma_len;

	hpmm_dev = file->private_data;
	if (!hpmm_dev || !hpmm_dev->mem_type) {
		pr_warn("Please set memory type first\n");
		return -EINVAL;
	}

	if (vma->vm_flags & VM_SHARED) {
		pr_warn("Shared hpmm mmap is not supported yet\n");
		return -EINVAL;
	}

	vma_len = (loff_t)(vma->vm_end - vma->vm_start);
	len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
	/* Check for overflow. */
	if (len < vma_len)
		return -EINVAL;

	/* VM_DONTEXPAND will naturally disable ksm and khugepaged. */
	vma->vm_flags |= VM_HYBRID_MM | VM_DONTEXPAND;
	vma->vm_ops = &hpmm_vm_ops;
	vma->vm_private_data = file->private_data;

	return 0;
}

static bool hpmm_mem_type_valid(int type)
{
	/* Whether memory type is well-defined? */
	if (!type)
		return false;
	if (type & ~HPMM_MEMORY_TYPE_MASK)
		return false;
	/* Invalid memory type combination. */
	if ((type & HPMM_PERSISTENT_MEMORY) && (type & HPMM_VOLATILE_MEMORY))
		return false;
	if ((type & HPMM_HIGH_RELIABLE_MEMORY) && (type & HPMM_LOW_RELIABLE_MEMORY))
		return false;

	return true;
}

static int hpmm_fill_dev_info(struct hpmm_device *hpmm_dev, struct hpmm_mem_info *hpmm_info)
{
	struct hpmm_driver *driver;
	int ret;

	driver = hpmm_driver_lookup(hpmm_info->type);
	if (!driver) {
		pr_warn("Unsupported memory type:%d\n", hpmm_info->type);
		return -EINVAL;
	}

	mutex_lock(&hpmm_dev->hpmm_lock);
	if (hpmm_dev->mem_type & HPMM_MEMORY_TYPE_MASK) {
		pr_info("hpmm_dev is aleardy set memory type");
		mutex_unlock(&hpmm_dev->hpmm_lock);
		return 0;
	}
	ret = driver->create(hpmm_dev, hpmm_info);
	mutex_unlock(&hpmm_dev->hpmm_lock);

	return ret;
}

static int hpmm_mem_info_valid(unsigned long arg, struct hpmm_mem_info *hpmm_info)
{
	void __user *buf = (void __user *)arg;

	if (!access_ok(buf, sizeof(struct hpmm_mem_info))) {
		pr_warn("Failed to access user data\n");
		return -EINVAL;
	}

	if (copy_from_user(hpmm_info, buf, sizeof(struct hpmm_mem_info))) {
		pr_warn("Failed to copy from user\n");
		return -EINVAL;
	}

	if (!hpmm_mem_type_valid(hpmm_info->type)) {
		pr_warn("Invalid hpmm memory type:%d\n", hpmm_info->type);
		return -EINVAL;
	}

	return 0;
}

static int hpmm_set_memory_type(struct hpmm_device *hpmm_dev, unsigned long arg)
{
	struct hpmm_mem_info hpmm_info;

	if (hpmm_mem_info_valid(arg, &hpmm_info) < 0) {
		pr_warn("Invalid hpmm memory info");
		return -EINVAL;
	}

	return hpmm_fill_dev_info(hpmm_dev, &hpmm_info);
}

static int hpmm_clear_memory(struct hpmm_device *hpmm_dev, unsigned long arg)
{
	struct hpmm_mem_info hpmm_info;
	void __user *buf = (void __user *)arg;
	struct hpmm_driver *driver;

	if (!access_ok(buf, sizeof(struct hpmm_mem_info))) {
		pr_warn("Failed to access user data\n");
		return -EINVAL;
	}

	if (copy_from_user(&hpmm_info, buf, sizeof(struct hpmm_mem_info))) {
		pr_warn("Failed to copy from user\n");
		return -EINVAL;
	}

	if (hpmm_info.type != HPMM_PERSISTENT_MEMORY) {
		pr_err("hpmm type : %d is invalid", hpmm_info.type);
		return -EINVAL;
	}

	driver = hpmm_driver_lookup(hpmm_info.type);
	if (!driver) {
		pr_err("no valid driver, memory type: %d\n", hpmm_info.type);
		return -EINVAL;
	}

	return driver->clear(hpmm_dev, &hpmm_info);
}

static int hpmm_recover_memory(struct hpmm_device *hpmm_dev, unsigned long arg)
{
	struct hpmm_mem_info hpmm_info;
	void __user *buf = (void __user *)arg;
	struct hpmm_driver *driver;

	if (!access_ok(buf, sizeof(struct hpmm_mem_info))) {
		pr_warn("Failed to access user data\n");
		return -EINVAL;
	}

	if (copy_from_user(&hpmm_info, buf, sizeof(struct hpmm_mem_info))) {
		pr_warn("Failed to copy from user\n");
		return -EINVAL;
	}

	if (hpmm_info.type != HPMM_PERSISTENT_MEMORY) {
		pr_err("hpmm type : %d is invalid", hpmm_info.type);
		return -EINVAL;
	}

	driver = hpmm_driver_lookup(hpmm_info.type);
	if (!driver) {
		pr_err("no valid driver, memory type: %d\n", hpmm_info.type);
		return -EINVAL;
	}

	return driver->recover(hpmm_dev, &hpmm_info);
}

static int hpmm_thp_memory_mapping(struct hpmm_device *hpmm_dev, unsigned long arg)
{
	struct hpmm_mem_info hpmm_info;
	void __user *buf = (void __user *)arg;
	struct hpmm_driver *driver;

	if (!access_ok(buf, sizeof(struct hpmm_mem_info))) {
		pr_warn("Failed to access user data\n");
		return -EINVAL;
	}

	if (copy_from_user(&hpmm_info, buf, sizeof(struct hpmm_mem_info))) {
		pr_warn("Failed to copy from user\n");
		return -EINVAL;
	}

	if (hpmm_info.type != HPMM_PERSISTENT_MEMORY) {
		pr_err("hpmm type : %d is invalid", hpmm_info.type);
		return -EINVAL;
	}

	driver = hpmm_driver_lookup(hpmm_info.type);
	if (!driver) {
		pr_err("no valid driver, memory type: %d\n", hpmm_info.type);
		return -EINVAL;
	}

	return driver->linear_memory_mapping(hpmm_dev, &hpmm_info);
}

static int hpmm_query_uce_record(struct hpmm_device *hpmm_dev, unsigned long arg)
{
	struct hpmm_uce_records *records;
	void __user *buf = (void __user *)arg;
	struct hpmm_driver *driver;
	int ret = -EFAULT;

	if (!access_ok(buf, sizeof(struct hpmm_uce_records))) {
		pr_warn("Failed to access user data\n");
		return ret;
	}

	records = kzalloc(sizeof(struct hpmm_uce_records), GFP_KERNEL);
	if (!records)
		return -ENOMEM;

	if (copy_from_user(records, buf, sizeof(struct hpmm_uce_records))) {
		pr_warn("Failed to copy from user\n");
		goto out;
	}

	driver = hpmm_driver_lookup(records->type);
	if (!driver) {
		pr_warn("Unsupported memory type:%d\n", records->type);
		goto out;
	}

	mutex_lock(&hpmm_dev->hpmm_lock);
	ret = driver->query_uce(records);
	mutex_unlock(&hpmm_dev->hpmm_lock);

	if (copy_to_user(buf, records, sizeof(struct hpmm_uce_records))) {
		pr_warn("Failed to copy to user\n");
		ret = -EFAULT;
	}
out:
	kfree(records);
	return ret;
}

static int hpmm_clear_uce_record(struct hpmm_device *hpmm_dev, unsigned long arg)
{
	struct hpmm_uce_records *records;
	void __user *buf = (void __user *)arg;
	struct hpmm_driver *driver;
	int ret = -EFAULT;

	if (!access_ok(buf, sizeof(struct hpmm_uce_records))) {
		pr_warn("Failed to access user data\n");
		return -EFAULT;
	}

	records = kzalloc(sizeof(struct hpmm_uce_records), GFP_KERNEL);
	if (!records)
		return -ENOMEM;

	if (copy_from_user(records, buf, sizeof(struct hpmm_uce_records))) {
		pr_warn("Failed to copy from user\n");
		goto out;
	}

	driver = hpmm_driver_lookup(records->type);
	if (!driver) {
		pr_warn("Unsupported memory type:%d\n", records->type);
		goto out;
	}

	mutex_lock(&hpmm_dev->hpmm_lock);
	ret = driver->clear_uce(records);
	mutex_unlock(&hpmm_dev->hpmm_lock);

out:
	kfree(records);
	return ret;
}


static long hpmm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct hpmm_device *hpmm_dev;
	long ret = 0;

	if (_IOC_TYPE(cmd) != HPMM_MAGIC) {
		pr_warn("Invalid cmd magic number '%#x', should be '#%x'\n",
			_IOC_TYPE(cmd), HPMM_MAGIC);
		return -EINVAL;
	}

	hpmm_dev = file->private_data;
	if (!hpmm_dev) {
		pr_warn("Unexpected null hpmm_device\n");
		return -ENOENT;
	}

	switch (cmd) {
	case HPMM_SET_MEMORY_TYPE:
		ret = hpmm_set_memory_type(hpmm_dev, arg);
		break;
	case HPMM_CLEAR_MEMORY:
		ret = hpmm_clear_memory(hpmm_dev, arg);
		break;
	case HPMM_RECOVER_MEMORY:
		ret = hpmm_recover_memory(hpmm_dev, arg);
		break;
	case HPMM_THP_MEMORY_MAPPING:
		ret = hpmm_thp_memory_mapping(hpmm_dev, arg);
		break;
	case HPMM_QUERY_UCE_RECORD:
		ret = hpmm_query_uce_record(hpmm_dev, arg);
		break;
	case HPMM_CLEAR_UCE_RECORD:
		ret = hpmm_clear_uce_record(hpmm_dev, arg);
		break;
	default:
		pr_warn("Invalid cmd '%#x'\n", cmd);
		ret = -EINVAL;
		break;
	}

	return ret;
}

static void set_hpmm_ops(void)
{
	hpmm_fops.owner			= THIS_MODULE;
	hpmm_fops.open			= hpmm_open;
	hpmm_fops.release		= hpmm_release;
	hpmm_fops.mmap			= hpmm_mmap;
	hpmm_fops.unlocked_ioctl	= hpmm_ioctl;
	hpmm_fops.compat_ioctl		= hpmm_ioctl;
};

static struct miscdevice hpmm_miscdev = {
	.minor	= MISC_DYNAMIC_MINOR,
	.name	= "hpmm",
	.fops	= &hpmm_fops,
	/* set the /dev/hpmm mode to 0600, the normal user can access it. */
	.mode	= 0600,
};

static int __init hpmm_init(void)
{
	int err;

	if (!bbu_memory_on()) {
		pr_err("the bbu memory is not enabled\n");
		return -1;
	}

	set_hpmm_ops();
	err = misc_register(&hpmm_miscdev);
	if (err != 0) {
		pr_err("Cannot register miscdev on minor=%d (err=%d)\n",
		       MISC_DYNAMIC_MINOR, err);
		return err;
	}
	hpmm_persist_register();
	pr_info("Module initialization success\n");

	return 0;
}

static void __exit hpmm_exit(void)
{
	hpmm_persist_unregister();
	misc_deregister(&hpmm_miscdev);
	pr_info("Module Unloaded\n");
}

module_init(hpmm_init);
module_exit(hpmm_exit);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Euler");
MODULE_DESCRIPTION("Euler Hybrid Memory Management");
